From 9c8835bd790732c61d0da189f65ea7f2fc50f808 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 24 Jul 2024 16:42:09 +0300 Subject: [PATCH 001/155] remove subnet IDs from uptime pkg --- snow/uptime/locked_calculator.go | 12 +- snow/uptime/locked_calculator_test.go | 19 +- snow/uptime/manager.go | 109 ++++------ snow/uptime/manager_test.go | 291 +++++++++++--------------- snow/uptime/no_op_calculator.go | 6 +- snow/uptime/state.go | 21 +- snow/uptime/test_state.go | 25 +-- 7 files changed, 200 insertions(+), 283 deletions(-) diff --git a/snow/uptime/locked_calculator.go b/snow/uptime/locked_calculator.go index 884878ab24f6..a1e9537a5e2f 100644 --- a/snow/uptime/locked_calculator.go +++ b/snow/uptime/locked_calculator.go @@ -35,7 +35,7 @@ func NewLockedCalculator() LockedCalculator { return &lockedCalculator{} } -func (c *lockedCalculator) CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) { +func (c *lockedCalculator) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { c.lock.RLock() defer c.lock.RUnlock() @@ -46,10 +46,10 @@ func (c *lockedCalculator) CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) ( c.calculatorLock.Lock() defer c.calculatorLock.Unlock() - return c.c.CalculateUptime(nodeID, subnetID) + return c.c.CalculateUptime(nodeID) } -func (c *lockedCalculator) CalculateUptimePercent(nodeID ids.NodeID, subnetID ids.ID) (float64, error) { +func (c *lockedCalculator) CalculateUptimePercent(nodeID ids.NodeID) (float64, error) { c.lock.RLock() defer c.lock.RUnlock() @@ -60,10 +60,10 @@ func (c *lockedCalculator) CalculateUptimePercent(nodeID ids.NodeID, subnetID id c.calculatorLock.Lock() defer c.calculatorLock.Unlock() - return c.c.CalculateUptimePercent(nodeID, subnetID) + return c.c.CalculateUptimePercent(nodeID) } -func (c *lockedCalculator) CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) (float64, error) { +func (c *lockedCalculator) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) { c.lock.RLock() defer c.lock.RUnlock() @@ -74,7 +74,7 @@ func (c *lockedCalculator) CalculateUptimePercentFrom(nodeID ids.NodeID, subnetI c.calculatorLock.Lock() defer c.calculatorLock.Unlock() - return c.c.CalculateUptimePercentFrom(nodeID, subnetID, startTime) + return c.c.CalculateUptimePercentFrom(nodeID, startTime) } func (c *lockedCalculator) SetCalculator(isBootstrapped *utils.Atomic[bool], lock sync.Locker, newC Calculator) { diff --git a/snow/uptime/locked_calculator_test.go b/snow/uptime/locked_calculator_test.go index 966722f6457d..9e5edaad8c63 100644 --- a/snow/uptime/locked_calculator_test.go +++ b/snow/uptime/locked_calculator_test.go @@ -24,14 +24,13 @@ func TestLockedCalculator(t *testing.T) { // Should still error because ctx is nil nodeID := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() - _, _, err := lc.CalculateUptime(nodeID, subnetID) + _, _, err := lc.CalculateUptime(nodeID) require.ErrorIs(err, errStillBootstrapping) - _, err = lc.CalculateUptimePercent(nodeID, subnetID) + _, err = lc.CalculateUptimePercent(nodeID) require.ErrorIs(err, errStillBootstrapping) - _, err = lc.CalculateUptimePercentFrom(nodeID, subnetID, time.Now()) + _, err = lc.CalculateUptimePercentFrom(nodeID, time.Now()) require.ErrorIs(err, errStillBootstrapping) var isBootstrapped utils.Atomic[bool] @@ -39,27 +38,27 @@ func TestLockedCalculator(t *testing.T) { // Should still error because ctx is not bootstrapped lc.SetCalculator(&isBootstrapped, &sync.Mutex{}, mockCalc) - _, _, err = lc.CalculateUptime(nodeID, subnetID) + _, _, err = lc.CalculateUptime(nodeID) require.ErrorIs(err, errStillBootstrapping) - _, err = lc.CalculateUptimePercent(nodeID, subnetID) + _, err = lc.CalculateUptimePercent(nodeID) require.ErrorIs(err, errStillBootstrapping) - _, err = lc.CalculateUptimePercentFrom(nodeID, subnetID, time.Now()) + _, err = lc.CalculateUptimePercentFrom(nodeID, time.Now()) require.ErrorIs(err, errStillBootstrapping) isBootstrapped.Set(true) // Should return the value from the mocked inner calculator mockCalc.EXPECT().CalculateUptime(gomock.Any(), gomock.Any()).AnyTimes().Return(time.Duration(0), time.Time{}, errTest) - _, _, err = lc.CalculateUptime(nodeID, subnetID) + _, _, err = lc.CalculateUptime(nodeID) require.ErrorIs(err, errTest) mockCalc.EXPECT().CalculateUptimePercent(gomock.Any(), gomock.Any()).AnyTimes().Return(float64(0), errTest) - _, err = lc.CalculateUptimePercent(nodeID, subnetID) + _, err = lc.CalculateUptimePercent(nodeID) require.ErrorIs(err, errTest) mockCalc.EXPECT().CalculateUptimePercentFrom(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(float64(0), errTest) - _, err = lc.CalculateUptimePercentFrom(nodeID, subnetID, time.Now()) + _, err = lc.CalculateUptimePercentFrom(nodeID, time.Now()) require.ErrorIs(err, errTest) } diff --git a/snow/uptime/manager.go b/snow/uptime/manager.go index a64b71ca62de..2a915da3fb6c 100644 --- a/snow/uptime/manager.go +++ b/snow/uptime/manager.go @@ -8,7 +8,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -20,42 +19,41 @@ type Manager interface { } type Tracker interface { - StartTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error - StopTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error + StartTracking(nodeIDs []ids.NodeID) error + StopTracking(nodeIDs []ids.NodeID) error - Connect(nodeID ids.NodeID, subnetID ids.ID) error - IsConnected(nodeID ids.NodeID, subnetID ids.ID) bool + Connect(nodeID ids.NodeID) error + IsConnected(nodeID ids.NodeID) bool Disconnect(nodeID ids.NodeID) error } type Calculator interface { - CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) - CalculateUptimePercent(nodeID ids.NodeID, subnetID ids.ID) (float64, error) + CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) + CalculateUptimePercent(nodeID ids.NodeID) (float64, error) // CalculateUptimePercentFrom expects [startTime] to be truncated (floored) to the nearest second - CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) (float64, error) + CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) } type manager struct { // Used to get time. Useful for faking time during tests. clock *mockable.Clock - state State - connections map[ids.NodeID]map[ids.ID]time.Time // nodeID -> subnetID -> time - trackedSubnets set.Set[ids.ID] + state State + connections map[ids.NodeID]time.Time // nodeID -> time } func NewManager(state State, clk *mockable.Clock) Manager { return &manager{ clock: clk, state: state, - connections: make(map[ids.NodeID]map[ids.ID]time.Time), + connections: make(map[ids.NodeID]time.Time), } } -func (m *manager) StartTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error { +func (m *manager) StartTracking(nodeIDs []ids.NodeID) error { now := m.clock.UnixTime() for _, nodeID := range nodeIDs { - upDuration, lastUpdated, err := m.state.GetUptime(nodeID, subnetID) + upDuration, lastUpdated, err := m.state.GetUptime(nodeID) if err != nil { return err } @@ -68,33 +66,30 @@ func (m *manager) StartTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error { durationOffline := now.Sub(lastUpdated) newUpDuration := upDuration + durationOffline - if err := m.state.SetUptime(nodeID, subnetID, newUpDuration, now); err != nil { + if err := m.state.SetUptime(nodeID, newUpDuration, now); err != nil { return err } } - m.trackedSubnets.Add(subnetID) return nil } -func (m *manager) StopTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error { +func (m *manager) StopTracking(nodeIDs []ids.NodeID) error { now := m.clock.UnixTime() for _, nodeID := range nodeIDs { - connectedSubnets := m.connections[nodeID] - // If the node is already connected to this subnet, then we can just + // If the node is already connected, then we can just // update the uptime in the state and remove the connection - if _, isConnected := connectedSubnets[subnetID]; isConnected { - if err := m.updateSubnetUptime(nodeID, subnetID); err != nil { - delete(connectedSubnets, subnetID) + if _, isConnected := m.connections[nodeID]; isConnected { + err := m.updateUptime(nodeID) + delete(m.connections, nodeID) + if err != nil { return err } - delete(connectedSubnets, subnetID) continue } - // if the node is not connected to this subnet, then we need to update - // the uptime in the state from the last time the node was connected to - // this subnet to now. - upDuration, lastUpdated, err := m.state.GetUptime(nodeID, subnetID) + // if the node is not connected, then we need to update + // the uptime in the state from the last time the node was connected to current time. + upDuration, lastUpdated, err := m.state.GetUptime(nodeID) if err != nil { return err } @@ -105,41 +100,34 @@ func (m *manager) StopTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error { continue } - if err := m.state.SetUptime(nodeID, subnetID, upDuration, now); err != nil { + if err := m.state.SetUptime(nodeID, upDuration, now); err != nil { return err } } return nil } -func (m *manager) Connect(nodeID ids.NodeID, subnetID ids.ID) error { - subnetConnections, ok := m.connections[nodeID] - if !ok { - subnetConnections = make(map[ids.ID]time.Time) - m.connections[nodeID] = subnetConnections - } - subnetConnections[subnetID] = m.clock.UnixTime() +func (m *manager) Connect(nodeID ids.NodeID) error { + m.connections[nodeID] = m.clock.UnixTime() return nil } -func (m *manager) IsConnected(nodeID ids.NodeID, subnetID ids.ID) bool { - _, connected := m.connections[nodeID][subnetID] +func (m *manager) IsConnected(nodeID ids.NodeID) bool { + _, connected := m.connections[nodeID] return connected } func (m *manager) Disconnect(nodeID ids.NodeID) error { - // Update every subnet that this node was connected to - for subnetID := range m.connections[nodeID] { - if err := m.updateSubnetUptime(nodeID, subnetID); err != nil { - return err - } + if err := m.updateUptime(nodeID); err != nil { + return err } + // TODO: shall we delete the connection regardless of the error? delete(m.connections, nodeID) return nil } -func (m *manager) CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) { - upDuration, lastUpdated, err := m.state.GetUptime(nodeID, subnetID) +func (m *manager) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { + upDuration, lastUpdated, err := m.state.GetUptime(nodeID) if err != nil { return 0, time.Time{}, err } @@ -151,13 +139,7 @@ func (m *manager) CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Dura return upDuration, lastUpdated, nil } - if !m.trackedSubnets.Contains(subnetID) { - durationOffline := now.Sub(lastUpdated) - newUpDuration := upDuration + durationOffline - return newUpDuration, now, nil - } - - timeConnected, isConnected := m.connections[nodeID][subnetID] + timeConnected, isConnected := m.connections[nodeID] if !isConnected { return upDuration, now, nil } @@ -181,16 +163,16 @@ func (m *manager) CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Dura return newUpDuration, now, nil } -func (m *manager) CalculateUptimePercent(nodeID ids.NodeID, subnetID ids.ID) (float64, error) { - startTime, err := m.state.GetStartTime(nodeID, subnetID) +func (m *manager) CalculateUptimePercent(nodeID ids.NodeID) (float64, error) { + startTime, err := m.state.GetStartTime(nodeID) if err != nil { return 0, err } - return m.CalculateUptimePercentFrom(nodeID, subnetID, startTime) + return m.CalculateUptimePercentFrom(nodeID, startTime) } -func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) (float64, error) { - upDuration, now, err := m.CalculateUptime(nodeID, subnetID) +func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) { + upDuration, now, err := m.CalculateUptime(nodeID) if err != nil { return 0, err } @@ -202,15 +184,10 @@ func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, return uptime, nil } -// updateSubnetUptime updates the subnet uptime of the node on the state by the amount -// of time that the node has been connected to the subnet. -func (m *manager) updateSubnetUptime(nodeID ids.NodeID, subnetID ids.ID) error { - // we're not tracking this subnet, skip updating it. - if !m.trackedSubnets.Contains(subnetID) { - return nil - } - - newDuration, newLastUpdated, err := m.CalculateUptime(nodeID, subnetID) +// updateUptime updates the uptime of the node on the state by the amount +// of time that the node has been connected. +func (m *manager) updateUptime(nodeID ids.NodeID) error { + newDuration, newLastUpdated, err := m.CalculateUptime(nodeID) if err == database.ErrNotFound { // If a non-validator disconnects, we don't care return nil @@ -219,5 +196,5 @@ func (m *manager) updateSubnetUptime(nodeID ids.NodeID, subnetID ids.ID) error { return err } - return m.state.SetUptime(nodeID, subnetID, newDuration, newLastUpdated) + return m.state.SetUptime(nodeID, newDuration, newLastUpdated) } diff --git a/snow/uptime/manager_test.go b/snow/uptime/manager_test.go index e04fcc3a9fbe..ef3de35070b9 100644 --- a/snow/uptime/manager_test.go +++ b/snow/uptime/manager_test.go @@ -21,11 +21,10 @@ func TestStartTracking(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) @@ -33,9 +32,9 @@ func TestStartTracking(t *testing.T) { currentTime := startTime.Add(time.Second) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Second, duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -45,12 +44,11 @@ func TestStartTrackingDBError(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() s.dbWriteError = errTest - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) @@ -58,7 +56,7 @@ func TestStartTrackingDBError(t *testing.T) { currentTime := startTime.Add(time.Second) clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StartTracking([]ids.NodeID{nodeID0}) require.ErrorIs(err, errTest) } @@ -70,9 +68,8 @@ func TestStartTrackingNonValidator(t *testing.T) { up := NewManager(s, &clk) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StartTracking([]ids.NodeID{nodeID0}) require.ErrorIs(err, database.ErrNotFound) } @@ -80,11 +77,10 @@ func TestStartTrackingInThePast(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) @@ -92,9 +88,9 @@ func TestStartTrackingInThePast(t *testing.T) { currentTime := startTime.Add(-time.Second) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(startTime.Truncate(time.Second), lastUpdated) @@ -104,29 +100,28 @@ func TestStopTrackingDecreasesUptime(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime = startTime.Add(time.Second) clk.Set(currentTime) - require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) up = NewManager(s, &clk) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -136,31 +131,30 @@ func TestStopTrackingIncreasesUptime(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) currentTime = startTime.Add(time.Second) clk.Set(currentTime) - require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) up = NewManager(s, &clk) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Second, duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -170,15 +164,14 @@ func TestStopTrackingDisconnectedNonValidator(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() s := NewTestState() clk := mockable.Clock{} up := NewManager(s, &clk) - require.NoError(up.StartTracking(nil, subnetID)) + require.NoError(up.StartTracking(nil)) - err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StopTracking([]ids.NodeID{nodeID0}) require.ErrorIs(err, database.ErrNotFound) } @@ -186,20 +179,19 @@ func TestStopTrackingConnectedDBError(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) - require.NoError(up.StartTracking(nil, subnetID)) + require.NoError(up.StartTracking(nil)) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) s.dbReadError = errTest - err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StopTracking([]ids.NodeID{nodeID0}) require.ErrorIs(err, errTest) } @@ -207,24 +199,23 @@ func TestStopTrackingNonConnectedPast(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime = currentTime.Add(-time.Second) clk.Set(currentTime) - require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) - duration, lastUpdated, err := s.GetUptime(nodeID0, subnetID) + duration, lastUpdated, err := s.GetUptime(nodeID0) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(startTime.Truncate(time.Second), lastUpdated) @@ -234,131 +225,104 @@ func TestStopTrackingNonConnectedDBError(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime = currentTime.Add(time.Second) clk.Set(currentTime) s.dbWriteError = errTest - err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StopTracking([]ids.NodeID{nodeID0}) require.ErrorIs(err, errTest) } func TestConnectAndDisconnect(t *testing.T) { - tests := []struct { - name string - subnetIDs []ids.ID - }{ - { - name: "Single Subnet", - subnetIDs: []ids.ID{ids.GenerateTestID()}, - }, - { - name: "Multiple Subnets", - subnetIDs: []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - - nodeID0 := ids.GenerateTestNodeID() - currentTime := time.Now() - startTime := currentTime - - s := NewTestState() - clk := mockable.Clock{} - up := NewManager(s, &clk) - clk.Set(currentTime) - - for _, subnetID := range tt.subnetIDs { - s.AddNode(nodeID0, subnetID, startTime) - - connected := up.IsConnected(nodeID0, subnetID) - require.False(connected) - - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) - - connected = up.IsConnected(nodeID0, subnetID) - require.False(connected) - - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) - require.NoError(err) - require.Equal(time.Duration(0), duration) - require.Equal(clk.UnixTime(), lastUpdated) - - require.NoError(up.Connect(nodeID0, subnetID)) - - connected = up.IsConnected(nodeID0, subnetID) - require.True(connected) - } - - currentTime = currentTime.Add(time.Second) - clk.Set(currentTime) - - for _, subnetID := range tt.subnetIDs { - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) - require.NoError(err) - require.Equal(time.Second, duration) - require.Equal(clk.UnixTime(), lastUpdated) - } - - require.NoError(up.Disconnect(nodeID0)) - - for _, subnetID := range tt.subnetIDs { - connected := up.IsConnected(nodeID0, subnetID) - require.False(connected) - } - - currentTime = currentTime.Add(time.Second) - clk.Set(currentTime) - - for _, subnetID := range tt.subnetIDs { - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) - require.NoError(err) - require.Equal(time.Second, duration) - require.Equal(clk.UnixTime(), lastUpdated) - } - }) - } + require := require.New(t) + + nodeID0 := ids.GenerateTestNodeID() + currentTime := time.Now() + startTime := currentTime + + s := NewTestState() + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) + + s.AddNode(nodeID0, startTime) + + connected := up.IsConnected(nodeID0) + require.False(connected) + + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + + connected = up.IsConnected(nodeID0) + require.False(connected) + + duration, lastUpdated, err := up.CalculateUptime(nodeID0) + require.NoError(err) + require.Equal(time.Duration(0), duration) + require.Equal(clk.UnixTime(), lastUpdated) + + require.NoError(up.Connect(nodeID0)) + + connected = up.IsConnected(nodeID0) + require.True(connected) + + currentTime = currentTime.Add(time.Second) + clk.Set(currentTime) + + duration, lastUpdated, err = up.CalculateUptime(nodeID0) + require.NoError(err) + require.Equal(time.Second, duration) + require.Equal(clk.UnixTime(), lastUpdated) + + require.NoError(up.Disconnect(nodeID0)) + + connected = up.IsConnected(nodeID0) + require.False(connected) + + currentTime = currentTime.Add(time.Second) + clk.Set(currentTime) + + duration, lastUpdated, err = up.CalculateUptime(nodeID0) + require.NoError(err) + require.Equal(time.Second, duration) + require.Equal(clk.UnixTime(), lastUpdated) } func TestConnectAndDisconnectBeforeTracking(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) currentTime = currentTime.Add(time.Second) clk.Set(currentTime) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) currentTime = currentTime.Add(time.Second) clk.Set(currentTime) require.NoError(up.Disconnect(nodeID0)) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(2*time.Second, duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -368,33 +332,32 @@ func TestUnrelatedNodeDisconnect(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() nodeID1 := ids.GenerateTestNodeID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(clk.UnixTime(), lastUpdated) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) - require.NoError(up.Connect(nodeID1, subnetID)) + require.NoError(up.Connect(nodeID1)) currentTime = currentTime.Add(time.Second) clk.Set(currentTime) - duration, lastUpdated, err = up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err = up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Second, duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -404,7 +367,7 @@ func TestUnrelatedNodeDisconnect(t *testing.T) { currentTime = currentTime.Add(time.Second) clk.Set(currentTime) - duration, lastUpdated, err = up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err = up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(2*time.Second, duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -414,11 +377,10 @@ func TestCalculateUptimeWhenNeverTracked(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) @@ -426,12 +388,12 @@ func TestCalculateUptimeWhenNeverTracked(t *testing.T) { currentTime := startTime.Add(time.Second) clk.Set(currentTime) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Second, duration) require.Equal(clk.UnixTime(), lastUpdated) - uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) + uptime, err := up.CalculateUptimePercentFrom(nodeID0, startTime.Truncate(time.Second)) require.NoError(err) require.Equal(float64(1), uptime) } @@ -440,7 +402,6 @@ func TestCalculateUptimeWhenNeverConnected(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() @@ -448,19 +409,19 @@ func TestCalculateUptimeWhenNeverConnected(t *testing.T) { clk := mockable.Clock{} up := NewManager(s, &clk) - require.NoError(up.StartTracking([]ids.NodeID{}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{})) - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) currentTime := startTime.Add(time.Second) clk.Set(currentTime) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(clk.UnixTime(), lastUpdated) - uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime) + uptime, err := up.CalculateUptimePercentFrom(nodeID0, startTime) require.NoError(err) require.Equal(float64(0), uptime) } @@ -469,28 +430,27 @@ func TestCalculateUptimeWhenConnectedBeforeTracking(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) currentTime = currentTime.Add(time.Second) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime = currentTime.Add(time.Second) clk.Set(currentTime) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(2*time.Second, duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -500,28 +460,27 @@ func TestCalculateUptimeWhenConnectedInFuture(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime = currentTime.Add(2 * time.Second) clk.Set(currentTime) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) currentTime = currentTime.Add(-time.Second) clk.Set(currentTime) - duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(clk.UnixTime(), lastUpdated) @@ -531,7 +490,6 @@ func TestCalculateUptimeNonValidator(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() @@ -539,7 +497,7 @@ func TestCalculateUptimeNonValidator(t *testing.T) { clk := mockable.Clock{} up := NewManager(s, &clk) - _, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime) + _, err := up.CalculateUptimePercentFrom(nodeID0, startTime) require.ErrorIs(err, database.ErrNotFound) } @@ -547,18 +505,17 @@ func TestCalculateUptimePercentageDivBy0(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) + uptime, err := up.CalculateUptimePercentFrom(nodeID0, startTime.Truncate(time.Second)) require.NoError(err) require.Equal(float64(1), uptime) } @@ -568,21 +525,20 @@ func TestCalculateUptimePercentage(t *testing.T) { nodeID0 := ids.GenerateTestNodeID() currentTime := time.Now() - subnetID := ids.GenerateTestID() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime = currentTime.Add(time.Second) clk.Set(currentTime) - uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) + uptime, err := up.CalculateUptimePercentFrom(nodeID0, startTime.Truncate(time.Second)) require.NoError(err) require.Equal(float64(0), uptime) } @@ -592,24 +548,23 @@ func TestStopTrackingUnixTimeRegression(t *testing.T) { nodeID0 := ids.GenerateTestNodeID() currentTime := time.Now() - subnetID := ids.GenerateTestID() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, subnetID, startTime) + s.AddNode(nodeID0, startTime) clk := mockable.Clock{} up := NewManager(s, &clk) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) currentTime = startTime.Add(time.Second) clk.Set(currentTime) - require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) currentTime = startTime.Add(time.Second) clk.Set(currentTime) @@ -619,14 +574,14 @@ func TestStopTrackingUnixTimeRegression(t *testing.T) { currentTime = startTime.Add(time.Second) clk.Set(currentTime) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - require.NoError(up.Connect(nodeID0, subnetID)) + require.NoError(up.Connect(nodeID0)) currentTime = startTime.Add(time.Second) clk.Set(currentTime) - perc, err := up.CalculateUptimePercent(nodeID0, subnetID) + perc, err := up.CalculateUptimePercent(nodeID0) require.NoError(err) require.GreaterOrEqual(float64(1), perc) } diff --git a/snow/uptime/no_op_calculator.go b/snow/uptime/no_op_calculator.go index fb308f4f6030..2f715d799e70 100644 --- a/snow/uptime/no_op_calculator.go +++ b/snow/uptime/no_op_calculator.go @@ -13,14 +13,14 @@ var NoOpCalculator Calculator = noOpCalculator{} type noOpCalculator struct{} -func (noOpCalculator) CalculateUptime(ids.NodeID, ids.ID) (time.Duration, time.Time, error) { +func (noOpCalculator) CalculateUptime(ids.NodeID) (time.Duration, time.Time, error) { return 0, time.Time{}, nil } -func (noOpCalculator) CalculateUptimePercent(ids.NodeID, ids.ID) (float64, error) { +func (noOpCalculator) CalculateUptimePercent(ids.NodeID) (float64, error) { return 0, nil } -func (noOpCalculator) CalculateUptimePercentFrom(ids.NodeID, ids.ID, time.Time) (float64, error) { +func (noOpCalculator) CalculateUptimePercentFrom(ids.NodeID, time.Time) (float64, error) { return 0, nil } diff --git a/snow/uptime/state.go b/snow/uptime/state.go index f9edeb76a3ee..59a720c897b2 100644 --- a/snow/uptime/state.go +++ b/snow/uptime/state.go @@ -10,34 +10,25 @@ import ( ) type State interface { - // GetUptime returns [upDuration] and [lastUpdated] of [nodeID] on - // [subnetID]. - // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator of - // the subnet. + // GetUptime returns [upDuration] and [lastUpdated] of [nodeID] + // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator. GetUptime( nodeID ids.NodeID, - subnetID ids.ID, ) (upDuration time.Duration, lastUpdated time.Time, err error) - // SetUptime updates [upDuration] and [lastUpdated] of [nodeID] on - // [subnetID]. - // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator of - // the subnet. + // SetUptime updates [upDuration] and [lastUpdated] of [nodeID] + // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator // Invariant: expects [lastUpdated] to be truncated (floored) to the nearest // second. SetUptime( nodeID ids.NodeID, - subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time, ) error - // GetStartTime returns the time that [nodeID] started validating - // [subnetID]. - // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator of - // the subnet. + // GetStartTime returns the time that [nodeID] started validating. + // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator. GetStartTime( nodeID ids.NodeID, - subnetID ids.ID, ) (startTime time.Time, err error) } diff --git a/snow/uptime/test_state.go b/snow/uptime/test_state.go index 23879b5cb3a9..213b591bc71c 100644 --- a/snow/uptime/test_state.go +++ b/snow/uptime/test_state.go @@ -21,38 +21,33 @@ type uptime struct { type TestState struct { dbReadError error dbWriteError error - nodes map[ids.NodeID]map[ids.ID]*uptime + nodes map[ids.NodeID]*uptime } func NewTestState() *TestState { return &TestState{ - nodes: make(map[ids.NodeID]map[ids.ID]*uptime), + nodes: make(map[ids.NodeID]*uptime), } } -func (s *TestState) AddNode(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) { - subnetUptimes, ok := s.nodes[nodeID] - if !ok { - subnetUptimes = make(map[ids.ID]*uptime) - s.nodes[nodeID] = subnetUptimes - } +func (s *TestState) AddNode(nodeID ids.NodeID, startTime time.Time) { st := time.Unix(startTime.Unix(), 0) - subnetUptimes[subnetID] = &uptime{ + s.nodes[nodeID] = &uptime{ lastUpdated: st, startTime: st, } } -func (s *TestState) GetUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) { - up, exists := s.nodes[nodeID][subnetID] +func (s *TestState) GetUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { + up, exists := s.nodes[nodeID] if !exists { return 0, time.Time{}, database.ErrNotFound } return up.upDuration, up.lastUpdated, s.dbReadError } -func (s *TestState) SetUptime(nodeID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { - up, exists := s.nodes[nodeID][subnetID] +func (s *TestState) SetUptime(nodeID ids.NodeID, upDuration time.Duration, lastUpdated time.Time) error { + up, exists := s.nodes[nodeID] if !exists { return database.ErrNotFound } @@ -61,8 +56,8 @@ func (s *TestState) SetUptime(nodeID ids.NodeID, subnetID ids.ID, upDuration tim return s.dbWriteError } -func (s *TestState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { - up, exists := s.nodes[nodeID][subnetID] +func (s *TestState) GetStartTime(nodeID ids.NodeID) (time.Time, error) { + up, exists := s.nodes[nodeID] if !exists { return time.Time{}, database.ErrNotFound } From 733d5fd3370d3c6ad736df488404cd5c3b024508 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 25 Jul 2024 21:04:58 +0300 Subject: [PATCH 002/155] remove subnet uptimes from platforvm --- vms/platformvm/api/static_service.go | 4 +- vms/platformvm/block/builder/helpers_test.go | 2 +- vms/platformvm/block/executor/block_test.go | 6 +-- vms/platformvm/block/executor/helpers_test.go | 2 +- vms/platformvm/block/executor/options.go | 2 - .../block/executor/proposal_block_test.go | 4 +- .../client_permissionless_validator.go | 2 +- vms/platformvm/service.go | 45 ++++++++++--------- vms/platformvm/state/mock_state.go | 24 +++++----- vms/platformvm/state/state.go | 12 ++++- vms/platformvm/state/state_test.go | 33 +------------- vms/platformvm/txs/executor/helpers_test.go | 4 +- vms/platformvm/vm.go | 20 ++------- 13 files changed, 63 insertions(+), 97 deletions(-) diff --git a/vms/platformvm/api/static_service.go b/vms/platformvm/api/static_service.go index 418b447114c7..326e5fb5e8a8 100644 --- a/vms/platformvm/api/static_service.go +++ b/vms/platformvm/api/static_service.go @@ -121,7 +121,7 @@ type PermissionlessValidator struct { DelegationFee json.Float32 `json:"delegationFee"` ExactDelegationFee *json.Uint32 `json:"exactDelegationFee,omitempty"` Uptime *json.Float32 `json:"uptime,omitempty"` - Connected bool `json:"connected"` + Connected *bool `json:"connected,omitempty"` Staked []UTXO `json:"staked,omitempty"` Signer *signer.ProofOfPossession `json:"signer,omitempty"` @@ -145,7 +145,7 @@ type GenesisPermissionlessValidator struct { type PermissionedValidator struct { Staker // The owner the staking reward, if applicable, will go to - Connected bool `json:"connected"` + Connected *bool `json:"connected,omitempty"` Uptime *json.Float32 `json:"uptime,omitempty"` } diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index df2d620c6eed..b492a8fdac13 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -221,7 +221,7 @@ func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam if res.isBootstrapped.Get() { validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + require.NoError(res.uptimes.StopTracking(validatorIDs)) require.NoError(res.state.Commit()) } diff --git a/vms/platformvm/block/executor/block_test.go b/vms/platformvm/block/executor/block_test.go index dfccc413e29b..bf020e164629 100644 --- a/vms/platformvm/block/executor/block_test.go +++ b/vms/platformvm/block/executor/block_test.go @@ -287,7 +287,7 @@ func TestBlockOptions(t *testing.T) { state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) uptimes := uptime.NewMockCalculator(ctrl) - uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(0.0, database.ErrNotFound) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, primaryNetworkValidatorStartTime).Return(0.0, database.ErrNotFound) manager := &manager{ backend: &backend{ @@ -405,7 +405,7 @@ func TestBlockOptions(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) uptimes := uptime.NewMockCalculator(ctrl) - uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, primaryNetworkValidatorStartTime).Return(.5, nil) manager := &manager{ backend: &backend{ @@ -467,7 +467,7 @@ func TestBlockOptions(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) uptimes := uptime.NewMockCalculator(ctrl) - uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, primaryNetworkValidatorStartTime).Return(.5, nil) manager := &manager{ backend: &backend{ diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 80dca6745fcc..b23a34b8264b 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -237,7 +237,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment if res.isBootstrapped.Get() { validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + require.NoError(res.uptimes.StopTracking(validatorIDs)) require.NoError(res.state.Commit()) } diff --git a/vms/platformvm/block/executor/options.go b/vms/platformvm/block/executor/options.go index f2071c8e13cb..7487ea449335 100644 --- a/vms/platformvm/block/executor/options.go +++ b/vms/platformvm/block/executor/options.go @@ -175,10 +175,8 @@ func (o *options) prefersCommit(tx *txs.Tx) (bool, error) { expectedUptimePercentage = float64(transformSubnet.UptimeRequirement) / reward.PercentDenominator } - // TODO: calculate subnet uptimes uptime, err := o.uptimes.CalculateUptimePercentFrom( nodeID, - constants.PrimaryNetworkID, primaryNetworkValidator.StartTime, ) if err != nil { diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index f0037754d06a..0b914aff4c98 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -105,7 +105,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() onParentAccept.EXPECT().GetDelegateeReward(constants.PrimaryNetworkID, utx.NodeID()).Return(uint64(0), nil).AnyTimes() - env.mockedState.EXPECT().GetUptime(gomock.Any(), constants.PrimaryNetworkID).Return( + env.mockedState.EXPECT().GetUptime(gomock.Any()).Return( time.Microsecond, /*upDuration*/ time.Time{}, /*lastUpdated*/ nil, /*err*/ @@ -219,7 +219,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { pendingStakersIt.EXPECT().Release().AnyTimes() onParentAccept.EXPECT().GetPendingStakerIterator().Return(pendingStakersIt, nil).AnyTimes() - env.mockedState.EXPECT().GetUptime(gomock.Any(), gomock.Any()).Return( + env.mockedState.EXPECT().GetUptime(gomock.Any).Return( time.Microsecond, /*upDuration*/ time.Time{}, /*lastUpdated*/ nil, /*err*/ diff --git a/vms/platformvm/client_permissionless_validator.go b/vms/platformvm/client_permissionless_validator.go index 3974f770658d..e9cdfa6ccbeb 100644 --- a/vms/platformvm/client_permissionless_validator.go +++ b/vms/platformvm/client_permissionless_validator.go @@ -133,7 +133,7 @@ func getClientPermissionlessValidators(validatorsSliceIntf []interface{}) ([]Cli AccruedDelegateeReward: (*uint64)(apiValidator.AccruedDelegateeReward), DelegationFee: float32(apiValidator.DelegationFee), Uptime: (*float32)(apiValidator.Uptime), - Connected: &apiValidator.Connected, + Connected: apiValidator.Connected, Signer: apiValidator.Signer, DelegatorCount: (*uint64)(apiValidator.DelegatorCount), DelegatorWeight: (*uint64)(apiValidator.DelegatorWeight), diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 0299192b6677..1bb82e1a1203 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -830,13 +830,20 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato shares := attr.shares delegationFee := avajson.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) - - uptime, err := s.getAPIUptime(currentStaker) - if err != nil { - return err + var uptime *avajson.Float32 + var connected *bool + // Only calculate uptime for primary network validators + // TODO: decide whether we want to keep connected for subnet validators + // it should be available at this point + if args.SubnetID == constants.PrimaryNetworkID { + currentUptime, isConnected, err := s.getAPIUptime(currentStaker) + if err != nil { + return err + } + connected = &isConnected + uptime = ¤tUptime } - connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) var ( validationRewardOwner *platformapi.Owner delegationRewardOwner *platformapi.Owner @@ -896,15 +903,8 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato vdrToDelegators[delegator.NodeID] = append(vdrToDelegators[delegator.NodeID], delegator) case txs.SubnetPermissionedValidatorCurrentPriority: - uptime, err := s.getAPIUptime(currentStaker) - if err != nil { - return err - } - connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) reply.Validators = append(reply.Validators, platformapi.PermissionedValidator{ - Staker: apiStaker, - Connected: connected, - Uptime: uptime, + Staker: apiStaker, }) default: @@ -1828,20 +1828,21 @@ func (s *Service) GetBlockByHeight(_ *http.Request, args *api.GetBlockByHeightAr return err } -func (s *Service) getAPIUptime(staker *state.Staker) (*avajson.Float32, error) { - // Only report uptimes that we have been actively tracking. - if constants.PrimaryNetworkID != staker.SubnetID && !s.vm.TrackedSubnets.Contains(staker.SubnetID) { - return nil, nil - } - - rawUptime, err := s.vm.uptimeManager.CalculateUptimePercentFrom(staker.NodeID, staker.SubnetID, staker.StartTime) +// Returns: +// 1) the uptime of a validator in the API format +// 2) whether the validator is currently connected +// 3) an error if one occurred +func (s *Service) getAPIUptime(staker *state.Staker) (avajson.Float32, bool, error) { + rawUptime, err := s.vm.uptimeManager.CalculateUptimePercentFrom(staker.NodeID, staker.StartTime) if err != nil { - return nil, err + return 0, false, err } + connected := s.vm.uptimeManager.IsConnected(staker.NodeID) + // Transform this to a percentage (0-100) to make it consistent // with observedUptime in info.peers API uptime := avajson.Float32(rawUptime * 100) - return &uptime, nil + return uptime, connected, nil } func (s *Service) getAPIOwner(owner *secp256k1fx.OutputOwners) (*platformapi.Owner, error) { diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index c1321567e6a9..36087cd934e6 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -1381,18 +1381,18 @@ func (mr *MockStateMockRecorder) GetRewardUTXOs(arg0 any) *gomock.Call { } // GetStartTime mocks base method. -func (m *MockState) GetStartTime(arg0 ids.NodeID, arg1 ids.ID) (time.Time, error) { +func (m *MockState) GetStartTime(arg0 ids.NodeID) (time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStartTime", arg0, arg1) + ret := m.ctrl.Call(m, "GetStartTime", arg0) ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStartTime indicates an expected call of GetStartTime. -func (mr *MockStateMockRecorder) GetStartTime(arg0, arg1 any) *gomock.Call { +func (mr *MockStateMockRecorder) GetStartTime(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartTime", reflect.TypeOf((*MockState)(nil).GetStartTime), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartTime", reflect.TypeOf((*MockState)(nil).GetStartTime), arg0) } // GetStatelessBlock mocks base method. @@ -1501,9 +1501,9 @@ func (mr *MockStateMockRecorder) GetUTXO(arg0 any) *gomock.Call { } // GetUptime mocks base method. -func (m *MockState) GetUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Duration, time.Time, error) { +func (m *MockState) GetUptime(arg0 ids.NodeID) (time.Duration, time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUptime", arg0, arg1) + ret := m.ctrl.Call(m, "GetUptime", arg0) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(time.Time) ret2, _ := ret[2].(error) @@ -1511,9 +1511,9 @@ func (m *MockState) GetUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Duration, time } // GetUptime indicates an expected call of GetUptime. -func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 any) *gomock.Call { +func (mr *MockStateMockRecorder) GetUptime(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0) } // PutCurrentDelegator mocks base method. @@ -1653,17 +1653,17 @@ func (mr *MockStateMockRecorder) SetTimestamp(arg0 any) *gomock.Call { } // SetUptime mocks base method. -func (m *MockState) SetUptime(arg0 ids.NodeID, arg1 ids.ID, arg2 time.Duration, arg3 time.Time) error { +func (m *MockState) SetUptime(arg0 ids.NodeID, arg1 time.Duration, arg2 time.Time) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetUptime", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SetUptime", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // SetUptime indicates an expected call of SetUptime. -func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2) } // UTXOIDs mocks base method. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index a6266a480858..d8e9ed3ea3bc 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -989,8 +989,8 @@ func (s *state) DeleteUTXO(utxoID ids.ID) { s.modifiedUTXOs[utxoID] = nil } -func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { - staker, err := s.currentStakers.GetValidator(subnetID, nodeID) +func (s *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { + staker, err := s.currentStakers.GetValidator(constants.PrimaryNetworkID, nodeID) if err != nil { return time.Time{}, err } @@ -2440,3 +2440,11 @@ func (s *state) ReindexBlocks(lock sync.Locker, log logging.Logger) error { return s.Commit() } + +func (s *state) GetUptime(vdrID ids.NodeID) (time.Duration, time.Time, error) { + return s.validatorState.GetUptime(vdrID, constants.PrimaryNetworkID) +} + +func (s *state) SetUptime(vdrID ids.NodeID, upDuration time.Duration, lastUpdated time.Time) error { + return s.validatorState.SetUptime(vdrID, constants.PrimaryNetworkID, upDuration, lastUpdated) +} diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index c6241ddc8cc4..517981552042 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -106,9 +106,6 @@ func TestPersistStakers(t *testing.T) { // with the right weight and showing the BLS key checkValidatorsSet func(*require.Assertions, *state, *Staker) - // Check that node duly track stakers uptimes - checkValidatorUptimes func(*require.Assertions, *state, *Staker) - // Check whether weight/bls keys diffs are duly stored checkDiffs func(*require.Assertions, *state, *Staker, uint64) }{ @@ -159,12 +156,6 @@ func TestPersistStakers(t *testing.T) { Weight: staker.Weight, }, valOut) }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - upDuration, lastUpdated, err := s.GetUptime(staker.NodeID, staker.SubnetID) - r.NoError(err) - r.Equal(upDuration, time.Duration(0)) - r.Equal(lastUpdated, staker.StartTime) - }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) @@ -261,7 +252,6 @@ func TestPersistStakers(t *testing.T) { r.Equal(valOut.NodeID, staker.NodeID) r.Equal(valOut.Weight, val.Weight+staker.Weight) }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // validator's weight must increase of delegator's weight amount weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) @@ -313,11 +303,6 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // pending validators uptime is not tracked - _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) - r.ErrorIs(err, database.ErrNotFound) - }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // pending validators weight diff and bls diffs are not stored _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) @@ -388,8 +373,7 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, + checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, }, "delete current validator": { storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { @@ -435,11 +419,6 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // uptimes of delete validators are dropped - _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) - r.ErrorIs(err, database.ErrNotFound) - }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) @@ -536,7 +515,6 @@ func TestPersistStakers(t *testing.T) { r.Equal(valOut.NodeID, staker.NodeID) r.Equal(valOut.Weight, val.Weight) }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // validator's weight must decrease of delegator's weight amount weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) @@ -590,10 +568,6 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) - r.ErrorIs(err, database.ErrNotFound) - }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.ErrorIs(err, database.ErrNotFound) @@ -661,8 +635,7 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, + checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, }, } @@ -680,7 +653,6 @@ func TestPersistStakers(t *testing.T) { // check all relevant data are stored test.checkStakerInState(require, state, staker) test.checkValidatorsSet(require, state, staker) - test.checkValidatorUptimes(require, state, staker) test.checkDiffs(require, state, staker, 0 /*height*/) // rebuild the state @@ -694,7 +666,6 @@ func TestPersistStakers(t *testing.T) { // check again that all relevant data are still available in rebuilt state test.checkStakerInState(require, state, staker) test.checkValidatorsSet(require, state, staker) - test.checkValidatorUptimes(require, state, staker) test.checkDiffs(require, state, staker, 0 /*height*/) }) } diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index a3b3033df44a..69321d539a65 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -187,12 +187,12 @@ func newEnvironment(t *testing.T, f fork) *environment { if env.isBootstrapped.Get() { validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - require.NoError(env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + require.NoError(env.uptimes.StopTracking(validatorIDs)) for subnetID := range env.config.TrackedSubnets { validatorIDs := env.config.Validators.GetValidatorIDs(subnetID) - require.NoError(env.uptimes.StopTracking(validatorIDs, subnetID)) + require.NoError(env.uptimes.StopTracking(validatorIDs)) } env.state.SetHeight(math.MaxUint64) require.NoError(env.state.Commit()) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 203688d23136..6690488ef291 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -349,7 +349,7 @@ func (vm *VM) onNormalOperationsStarted() error { } primaryVdrIDs := vm.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - if err := vm.uptimeManager.StartTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { + if err := vm.uptimeManager.StartTracking(primaryVdrIDs); err != nil { return err } @@ -357,11 +357,6 @@ func (vm *VM) onNormalOperationsStarted() error { vm.Validators.RegisterSetCallbackListener(constants.PrimaryNetworkID, vl) for subnetID := range vm.TrackedSubnets { - vdrIDs := vm.Validators.GetValidatorIDs(subnetID) - if err := vm.uptimeManager.StartTracking(vdrIDs, subnetID); err != nil { - return err - } - vl := validators.NewLogger(vm.ctx.Log, subnetID, vm.ctx.NodeID) vm.Validators.RegisterSetCallbackListener(subnetID, vl) } @@ -397,17 +392,10 @@ func (vm *VM) Shutdown(context.Context) error { if vm.bootstrapped.Get() { primaryVdrIDs := vm.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - if err := vm.uptimeManager.StopTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { + if err := vm.uptimeManager.StopTracking(primaryVdrIDs); err != nil { return err } - for subnetID := range vm.TrackedSubnets { - vdrIDs := vm.Validators.GetValidatorIDs(subnetID) - if err := vm.uptimeManager.StopTracking(vdrIDs, subnetID); err != nil { - return err - } - } - if err := vm.state.Commit(); err != nil { return err } @@ -473,14 +461,14 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { - if err := vm.uptimeManager.Connect(nodeID, constants.PrimaryNetworkID); err != nil { + if err := vm.uptimeManager.Connect(nodeID); err != nil { return err } return vm.Network.Connected(ctx, nodeID, version) } func (vm *VM) ConnectedSubnet(_ context.Context, nodeID ids.NodeID, subnetID ids.ID) error { - return vm.uptimeManager.Connect(nodeID, subnetID) + return nil } func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { From 10d8e868affdac0a0f5319718975a74d817ccc7e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 25 Jul 2024 21:05:35 +0300 Subject: [PATCH 003/155] remove subnet uptimes from p2p --- message/messages_test.go | 11 +- message/mock_outbound_message_builder.go | 8 +- message/outbound_msg_builder.go | 5 +- network/metrics.go | 52 +- network/network.go | 39 +- network/peer/info.go | 21 +- network/peer/message_queue_test.go | 11 +- network/peer/peer.go | 136 +--- network/peer/peer_test.go | 126 +--- network/peer/set_test.go | 22 +- proto/p2p/p2p.proto | 11 +- proto/pb/p2p/p2p.pb.go | 833 ++++++++++------------- 12 files changed, 469 insertions(+), 806 deletions(-) diff --git a/message/messages_test.go b/message/messages_test.go index 8314fa805e38..7d4ace78b5bc 100644 --- a/message/messages_test.go +++ b/message/messages_test.go @@ -54,7 +54,7 @@ func TestMessage(t *testing.T) { bytesSaved bool // if true, outbound message saved bytes must be non-zero }{ { - desc: "ping message with no compression no subnet uptimes", + desc: "ping message with no compression no uptime", op: PingOp, msg: &p2p.Message{ Message: &p2p.Message_Ping{ @@ -78,17 +78,12 @@ func TestMessage(t *testing.T) { bytesSaved: false, }, { - desc: "ping message with no compression and subnet uptimes", + desc: "ping message with no compression and uptime", op: PingOp, msg: &p2p.Message{ Message: &p2p.Message_Ping{ Ping: &p2p.Ping{ - SubnetUptimes: []*p2p.SubnetUptime{ - { - SubnetId: testID[:], - Uptime: 100, - }, - }, + Uptime: 100, }, }, }, diff --git a/message/mock_outbound_message_builder.go b/message/mock_outbound_message_builder.go index 917d764028fe..c480ca1d1c65 100644 --- a/message/mock_outbound_message_builder.go +++ b/message/mock_outbound_message_builder.go @@ -314,18 +314,18 @@ func (mr *MockOutboundMsgBuilderMockRecorder) PeerList(arg0, arg1 any) *gomock.C } // Ping mocks base method. -func (m *MockOutboundMsgBuilder) Ping(arg0 uint32, arg1 []*p2p.SubnetUptime) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Ping(arg0 uint32) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Ping", arg0, arg1) + ret := m.ctrl.Call(m, "Ping", arg0) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // Ping indicates an expected call of Ping. -func (mr *MockOutboundMsgBuilderMockRecorder) Ping(arg0, arg1 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Ping(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ping), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ping), arg0) } // Pong mocks base method. diff --git a/message/outbound_msg_builder.go b/message/outbound_msg_builder.go index 78aacdce3e08..208b04902ae1 100644 --- a/message/outbound_msg_builder.go +++ b/message/outbound_msg_builder.go @@ -49,7 +49,6 @@ type OutboundMsgBuilder interface { Ping( primaryUptime uint32, - subnetUptimes []*p2p.SubnetUptime, ) (OutboundMessage, error) Pong() (OutboundMessage, error) @@ -198,14 +197,12 @@ func newOutboundBuilder(compressionType compression.Type, builder *msgBuilder) O func (b *outMsgBuilder) Ping( primaryUptime uint32, - subnetUptimes []*p2p.SubnetUptime, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_Ping{ Ping: &p2p.Ping{ - Uptime: primaryUptime, - SubnetUptimes: subnetUptimes, + Uptime: primaryUptime, }, }, }, diff --git a/network/metrics.go b/network/metrics.go index 9702e821b246..fda3c0bf5f85 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -19,24 +19,22 @@ type metrics struct { // trackedSubnets does not include the primary network ID trackedSubnets set.Set[ids.ID] - numTracked prometheus.Gauge - numPeers prometheus.Gauge - numSubnetPeers *prometheus.GaugeVec - timeSinceLastMsgSent prometheus.Gauge - timeSinceLastMsgReceived prometheus.Gauge - sendFailRate prometheus.Gauge - connected prometheus.Counter - disconnected prometheus.Counter - acceptFailed prometheus.Counter - inboundConnRateLimited prometheus.Counter - inboundConnAllowed prometheus.Counter - tlsConnRejected prometheus.Counter - numUselessPeerListBytes prometheus.Counter - nodeUptimeWeightedAverage prometheus.Gauge - nodeUptimeRewardingStake prometheus.Gauge - nodeSubnetUptimeWeightedAverage *prometheus.GaugeVec - nodeSubnetUptimeRewardingStake *prometheus.GaugeVec - peerConnectedLifetimeAverage prometheus.Gauge + numTracked prometheus.Gauge + numPeers prometheus.Gauge + numSubnetPeers *prometheus.GaugeVec + timeSinceLastMsgSent prometheus.Gauge + timeSinceLastMsgReceived prometheus.Gauge + sendFailRate prometheus.Gauge + connected prometheus.Counter + disconnected prometheus.Counter + acceptFailed prometheus.Counter + inboundConnRateLimited prometheus.Counter + inboundConnAllowed prometheus.Counter + tlsConnRejected prometheus.Counter + numUselessPeerListBytes prometheus.Counter + nodeUptimeWeightedAverage prometheus.Gauge + nodeUptimeRewardingStake prometheus.Gauge + peerConnectedLifetimeAverage prometheus.Gauge lock sync.RWMutex peerConnectedStartTimes map[ids.NodeID]float64 @@ -112,20 +110,6 @@ func newMetrics( Name: "node_uptime_rewarding_stake", Help: "The percentage of total stake which thinks this node is eligible for rewards", }), - nodeSubnetUptimeWeightedAverage: prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "node_subnet_uptime_weighted_average", - Help: "This node's subnet uptime averages weighted by observing subnet peer stakes", - }, - []string{"subnetID"}, - ), - nodeSubnetUptimeRewardingStake: prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "node_subnet_uptime_rewarding_stake", - Help: "The percentage of subnet's total stake which thinks this node is eligible for subnet's rewards", - }, - []string{"subnetID"}, - ), peerConnectedLifetimeAverage: prometheus.NewGauge( prometheus.GaugeOpts{ Name: "peer_connected_duration_average", @@ -151,8 +135,6 @@ func newMetrics( registerer.Register(m.inboundConnRateLimited), registerer.Register(m.nodeUptimeWeightedAverage), registerer.Register(m.nodeUptimeRewardingStake), - registerer.Register(m.nodeSubnetUptimeWeightedAverage), - registerer.Register(m.nodeSubnetUptimeRewardingStake), registerer.Register(m.peerConnectedLifetimeAverage), ) @@ -161,8 +143,6 @@ func newMetrics( // initialize to 0 subnetIDStr := subnetID.String() m.numSubnetPeers.WithLabelValues(subnetIDStr).Set(0) - m.nodeSubnetUptimeWeightedAverage.WithLabelValues(subnetIDStr).Set(0) - m.nodeSubnetUptimeRewardingStake.WithLabelValues(subnetIDStr).Set(0) } return m, err diff --git a/network/network.go b/network/network.go index 2aee13a910d9..e92bc248a218 100644 --- a/network/network.go +++ b/network/network.go @@ -85,9 +85,9 @@ type Network interface { // info about the peers in [nodeIDs] that have finished the handshake. PeerInfo(nodeIDs []ids.NodeID) []peer.Info - // NodeUptime returns given node's [subnetID] UptimeResults in the view of + // NodeUptime returns given node's primary network UptimeResults in the view of // this node's peer validators. - NodeUptime(subnetID ids.ID) (UptimeResult, error) + NodeUptime() (UptimeResult, error) } type UptimeResult struct { @@ -1098,19 +1098,15 @@ func (n *network) StartClose() { }) } -func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { - if subnetID != constants.PrimaryNetworkID && !n.config.TrackedSubnets.Contains(subnetID) { - return UptimeResult{}, errNotTracked - } - - myStake := n.config.Validators.GetWeight(subnetID, n.config.MyNodeID) +func (n *network) NodeUptime() (UptimeResult, error) { + myStake := n.config.Validators.GetWeight(constants.PrimaryNetworkID, n.config.MyNodeID) if myStake == 0 { return UptimeResult{}, errNotValidator } - totalWeightInt, err := n.config.Validators.TotalWeight(subnetID) + totalWeightInt, err := n.config.Validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { - return UptimeResult{}, fmt.Errorf("error while fetching weight for subnet %s: %w", subnetID, err) + return UptimeResult{}, fmt.Errorf("error while fetching weight for primary network %w", err) } var ( @@ -1126,22 +1122,18 @@ func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { peer, _ := n.connectedPeers.GetByIndex(i) nodeID := peer.ID() - weight := n.config.Validators.GetWeight(subnetID, nodeID) + weight := n.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) if weight == 0 { // this is not a validator skip it. continue } - observedUptime, exist := peer.ObservedUptime(subnetID) - if !exist { - observedUptime = 0 - } + observedUptime := peer.ObservedUptime() percent := float64(observedUptime) weightFloat := float64(weight) totalWeightedPercent += percent * weightFloat // if this peer thinks we're above requirement add the weight - // TODO: use subnet-specific uptime requirements if percent/100 >= n.config.UptimeRequirement { rewardingStake += weightFloat } @@ -1177,7 +1169,7 @@ func (n *network) runTimers() { n.peerConfig.Log.Debug("reset ip tracker bloom filter") } case <-updateUptimes.C: - primaryUptime, err := n.NodeUptime(constants.PrimaryNetworkID) + primaryUptime, err := n.NodeUptime() if err != nil { n.peerConfig.Log.Debug("failed to get primary network uptime", zap.Error(err), @@ -1185,19 +1177,6 @@ func (n *network) runTimers() { } n.metrics.nodeUptimeWeightedAverage.Set(primaryUptime.WeightedAveragePercentage) n.metrics.nodeUptimeRewardingStake.Set(primaryUptime.RewardingStakePercentage) - - for subnetID := range n.config.TrackedSubnets { - result, err := n.NodeUptime(subnetID) - if err != nil { - n.peerConfig.Log.Debug("failed to get subnet uptime", - zap.Stringer("subnetID", subnetID), - zap.Error(err), - ) - } - subnetIDStr := subnetID.String() - n.metrics.nodeSubnetUptimeWeightedAverage.WithLabelValues(subnetIDStr).Set(result.WeightedAveragePercentage) - n.metrics.nodeSubnetUptimeRewardingStake.WithLabelValues(subnetIDStr).Set(result.RewardingStakePercentage) - } } } } diff --git a/network/peer/info.go b/network/peer/info.go index 928c47ff26ee..71d2c78420f9 100644 --- a/network/peer/info.go +++ b/network/peer/info.go @@ -13,15 +13,14 @@ import ( ) type Info struct { - IP netip.AddrPort `json:"ip"` - PublicIP netip.AddrPort `json:"publicIP,omitempty"` - ID ids.NodeID `json:"nodeID"` - Version string `json:"version"` - LastSent time.Time `json:"lastSent"` - LastReceived time.Time `json:"lastReceived"` - ObservedUptime json.Uint32 `json:"observedUptime"` - ObservedSubnetUptimes map[ids.ID]json.Uint32 `json:"observedSubnetUptimes"` - TrackedSubnets set.Set[ids.ID] `json:"trackedSubnets"` - SupportedACPs set.Set[uint32] `json:"supportedACPs"` - ObjectedACPs set.Set[uint32] `json:"objectedACPs"` + IP netip.AddrPort `json:"ip"` + PublicIP netip.AddrPort `json:"publicIP,omitempty"` + ID ids.NodeID `json:"nodeID"` + Version string `json:"version"` + LastSent time.Time `json:"lastSent"` + LastReceived time.Time `json:"lastReceived"` + ObservedUptime json.Uint32 `json:"observedUptime"` + TrackedSubnets set.Set[ids.ID] `json:"trackedSubnets"` + SupportedACPs set.Set[uint32] `json:"supportedACPs"` + ObjectedACPs set.Set[uint32] `json:"objectedACPs"` } diff --git a/network/peer/message_queue_test.go b/network/peer/message_queue_test.go index 4b9b63f4c449..a4cce461abab 100644 --- a/network/peer/message_queue_test.go +++ b/network/peer/message_queue_test.go @@ -9,9 +9,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -33,14 +31,7 @@ func TestMessageQueue(t *testing.T) { // Assert that the messages are popped in the same order they were pushed for i := 0; i < numToSend; i++ { - testID := ids.GenerateTestID() - testID2 := ids.GenerateTestID() - m, err := mc.Ping( - uint32(i), - []*p2p.SubnetUptime{ - {SubnetId: testID[:], Uptime: uint32(i)}, - {SubnetId: testID2[:], Uptime: uint32(i)}, - }) + m, err := mc.Ping(uint32(i)) require.NoError(err) msgs = append(msgs, m) } diff --git a/network/peer/peer.go b/network/peer/peer.go index a92791ff72ee..e3161cfc0eab 100644 --- a/network/peer/peer.go +++ b/network/peer/peer.go @@ -94,10 +94,10 @@ type Peer interface { // be called after [Ready] returns true. TrackedSubnets() set.Set[ids.ID] - // ObservedUptime returns the local node's subnet uptime according to the + // ObservedUptime returns the local node's primary network uptime according to the // peer. The value ranges from [0, 100]. It should only be called after // [Ready] returns true. - ObservedUptime(subnetID ids.ID) (uint32, bool) + ObservedUptime() uint32 // Send attempts to send [msg] to the peer. The peer takes ownership of // [msg] for reference counting. This returns false if the message is @@ -158,10 +158,8 @@ type peer struct { // this can only be accessed by the message sender goroutine. txIDOfVerifiedBLSKey ids.ID - observedUptimesLock sync.RWMutex - // [observedUptimesLock] must be held while accessing [observedUptime] - // Subnet ID --> Our uptime for the given subnet as perceived by the peer - observedUptimes map[ids.ID]uint32 + // Our primary network uptime perceived by the peer + observedUptime utils.Atomic[uint32] // True if this peer has sent us a valid Handshake message and // is running a compatible version. @@ -221,7 +219,6 @@ func Start( onClosingCtx: onClosingCtx, onClosingCtxCancel: onClosingCtxCancel, onClosed: make(chan struct{}), - observedUptimes: make(map[ids.ID]uint32), getPeerListChan: make(chan struct{}, 1), } @@ -270,33 +267,20 @@ func (p *peer) AwaitReady(ctx context.Context) error { } func (p *peer) Info() Info { - uptimes := make(map[ids.ID]json.Uint32, p.MySubnets.Len()) - for subnetID := range p.MySubnets { - uptime, exist := p.ObservedUptime(subnetID) - if !exist { - continue - } - uptimes[subnetID] = json.Uint32(uptime) - } - - primaryUptime, exist := p.ObservedUptime(constants.PrimaryNetworkID) - if !exist { - primaryUptime = 0 - } + primaryUptime := p.ObservedUptime() ip, _ := ips.ParseAddrPort(p.conn.RemoteAddr().String()) return Info{ - IP: ip, - PublicIP: p.ip.AddrPort, - ID: p.id, - Version: p.version.String(), - LastSent: p.LastSent(), - LastReceived: p.LastReceived(), - ObservedUptime: json.Uint32(primaryUptime), - ObservedSubnetUptimes: uptimes, - TrackedSubnets: p.trackedSubnets, - SupportedACPs: p.supportedACPs, - ObjectedACPs: p.objectedACPs, + IP: ip, + PublicIP: p.ip.AddrPort, + ID: p.id, + Version: p.version.String(), + LastSent: p.LastSent(), + LastReceived: p.LastReceived(), + ObservedUptime: json.Uint32(primaryUptime), + TrackedSubnets: p.trackedSubnets, + SupportedACPs: p.supportedACPs, + ObjectedACPs: p.objectedACPs, } } @@ -312,12 +296,8 @@ func (p *peer) TrackedSubnets() set.Set[ids.ID] { return p.trackedSubnets } -func (p *peer) ObservedUptime(subnetID ids.ID) (uint32, bool) { - p.observedUptimesLock.RLock() - defer p.observedUptimesLock.RUnlock() - - uptime, exist := p.observedUptimes[subnetID] - return uptime, exist +func (p *peer) ObservedUptime() uint32 { + return p.observedUptime.Get() } func (p *peer) Send(ctx context.Context, msg message.OutboundMessage) bool { @@ -670,8 +650,8 @@ func (p *peer) sendNetworkMessages() { return } - primaryUptime, subnetUptimes := p.getUptimes() - pingMessage, err := p.MessageCreator.Ping(primaryUptime, subnetUptimes) + primaryUptime := p.getUptime() + pingMessage, err := p.MessageCreator.Ping(primaryUptime) if err != nil { p.Log.Error(failedToCreateMessageLog, zap.Stringer("nodeID", p.id), @@ -782,45 +762,7 @@ func (p *peer) handlePing(msg *p2p.Ping) { p.StartClose() return } - p.observeUptime(constants.PrimaryNetworkID, msg.Uptime) - - for _, subnetUptime := range msg.SubnetUptimes { - subnetID, err := ids.ToID(subnetUptime.SubnetId) - if err != nil { - p.Log.Debug(malformedMessageLog, - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PingOp), - zap.String("field", "subnetID"), - zap.Error(err), - ) - p.StartClose() - return - } - - if !p.MySubnets.Contains(subnetID) { - p.Log.Debug(malformedMessageLog, - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PingOp), - zap.Stringer("subnetID", subnetID), - zap.String("reason", "not tracking subnet"), - ) - p.StartClose() - return - } - - uptime := subnetUptime.Uptime - if uptime > 100 { - p.Log.Debug(malformedMessageLog, - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PingOp), - zap.Stringer("subnetID", subnetID), - zap.Uint32("uptime", uptime), - ) - p.StartClose() - return - } - p.observeUptime(subnetID, uptime) - } + p.observedUptime.Set(msg.Uptime) pongMessage, err := p.MessageCreator.Pong() if err != nil { @@ -836,10 +778,9 @@ func (p *peer) handlePing(msg *p2p.Ping) { p.Send(p.onClosingCtx, pongMessage) } -func (p *peer) getUptimes() (uint32, []*p2p.SubnetUptime) { +func (p *peer) getUptime() uint32 { primaryUptime, err := p.UptimeCalculator.CalculateUptimePercent( p.id, - constants.PrimaryNetworkID, ) if err != nil { p.Log.Debug(failedToGetUptimeLog, @@ -850,45 +791,12 @@ func (p *peer) getUptimes() (uint32, []*p2p.SubnetUptime) { primaryUptime = 0 } - subnetUptimes := make([]*p2p.SubnetUptime, 0, p.MySubnets.Len()) - for subnetID := range p.MySubnets { - if !p.trackedSubnets.Contains(subnetID) { - continue - } - - subnetUptime, err := p.UptimeCalculator.CalculateUptimePercent(p.id, subnetID) - if err != nil { - p.Log.Debug(failedToGetUptimeLog, - zap.Stringer("nodeID", p.id), - zap.Stringer("subnetID", subnetID), - zap.Error(err), - ) - continue - } - - subnetID := subnetID - subnetUptimes = append(subnetUptimes, &p2p.SubnetUptime{ - SubnetId: subnetID[:], - Uptime: uint32(subnetUptime * 100), - }) - } - primaryUptimePercent := uint32(primaryUptime * 100) - return primaryUptimePercent, subnetUptimes + return primaryUptimePercent } func (*peer) handlePong(*p2p.Pong) {} -// Record that the given peer perceives our uptime for the given [subnetID] -// to be [uptime]. -// Assumes [uptime] is in the range [0, 100] and [subnetID] is a valid ID of a -// subnet this peer tracks. -func (p *peer) observeUptime(subnetID ids.ID, uptime uint32) { - p.observedUptimesLock.Lock() - p.observedUptimes[subnetID] = uptime // [0, 100] percentage - p.observedUptimesLock.Unlock() -} - func (p *peer) handleHandshake(msg *p2p.Handshake) { if p.gotHandshake.Get() { p.Log.Debug(malformedMessageLog, diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index e29edbe17ba6..ae2f86a74fc1 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" @@ -211,122 +210,35 @@ func TestSend(t *testing.T) { } func TestPingUptimes(t *testing.T) { - trackedSubnetID := ids.GenerateTestID() - untrackedSubnetID := ids.GenerateTestID() - sharedConfig := newConfig(t) - sharedConfig.MySubnets = set.Of(trackedSubnetID) - - testCases := []struct { - name string - msg message.OutboundMessage - shouldClose bool - assertFn func(*require.Assertions, *testPeer) - }{ - { - name: "primary network only", - msg: func() message.OutboundMessage { - pingMsg, err := sharedConfig.MessageCreator.Ping(1, nil) - require.NoError(t, err) - return pingMsg - }(), - shouldClose: false, - assertFn: func(require *require.Assertions, peer *testPeer) { - uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) - require.True(ok) - require.Equal(uint32(1), uptime) - - uptime, ok = peer.ObservedUptime(trackedSubnetID) - require.False(ok) - require.Zero(uptime) - }, - }, - { - name: "primary network and subnet", - msg: func() message.OutboundMessage { - pingMsg, err := sharedConfig.MessageCreator.Ping( - 1, - []*p2p.SubnetUptime{ - { - SubnetId: trackedSubnetID[:], - Uptime: 1, - }, - }, - ) - require.NoError(t, err) - return pingMsg - }(), - shouldClose: false, - assertFn: func(require *require.Assertions, peer *testPeer) { - uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) - require.True(ok) - require.Equal(uint32(1), uptime) - - uptime, ok = peer.ObservedUptime(trackedSubnetID) - require.True(ok) - require.Equal(uint32(1), uptime) - }, - }, - { - name: "primary network and non tracked subnet", - msg: func() message.OutboundMessage { - pingMsg, err := sharedConfig.MessageCreator.Ping( - 1, - []*p2p.SubnetUptime{ - { - // Providing the untrackedSubnetID here should cause - // the remote peer to disconnect from us. - SubnetId: untrackedSubnetID[:], - Uptime: 1, - }, - { - SubnetId: trackedSubnetID[:], - Uptime: 1, - }, - }, - ) - require.NoError(t, err) - return pingMsg - }(), - shouldClose: true, - assertFn: nil, - }, - } // The raw peers are generated outside of the test cases to avoid generating // many TLS keys. rawPeer0 := newRawTestPeer(t, sharedConfig) rawPeer1 := newRawTestPeer(t, sharedConfig) - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - require := require.New(t) - - peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) - awaitReady(t, peer0, peer1) - defer func() { - peer1.StartClose() - peer0.StartClose() - require.NoError(peer0.AwaitClosed(context.Background())) - require.NoError(peer1.AwaitClosed(context.Background())) - }() - - require.True(peer0.Send(context.Background(), tc.msg)) + require := require.New(t) - if tc.shouldClose { - require.NoError(peer1.AwaitClosed(context.Background())) - return - } + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) + awaitReady(t, peer0, peer1) + defer func() { + peer1.StartClose() + peer0.StartClose() + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) + }() + pingMsg, err := sharedConfig.MessageCreator.Ping(0) + require.NoError(err) + require.True(peer0.Send(context.Background(), pingMsg)) - // we send Get message after ping to ensure Ping is handled by the - // time Get is handled. This is because Get is routed to the handler - // whereas Ping is handled by the peer directly. We have no way to - // know when the peer has handled the Ping message. - sendAndFlush(t, peer0, peer1) + // we send Get message after ping to ensure Ping is handled by the + // time Get is handled. This is because Get is routed to the handler + // whereas Ping is handled by the peer directly. We have no way to + // know when the peer has handled the Ping message. + sendAndFlush(t, peer0, peer1) - tc.assertFn(require, peer1) - }) - } + uptime := peer1.ObservedUptime() + require.Equal(uint32(0), uptime) } func TestTrackedSubnets(t *testing.T) { diff --git a/network/peer/set_test.go b/network/peer/set_test.go index c28c1ce7ea98..fb84b5e67d74 100644 --- a/network/peer/set_test.go +++ b/network/peer/set_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils" ) func TestSet(t *testing.T) { @@ -18,12 +18,12 @@ func TestSet(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.BuildTestNodeID([]byte{0x01}), - observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 0}, + id: ids.BuildTestNodeID([]byte{0x01}), + observedUptime: *utils.NewAtomic[uint32](0), } updatedPeer1 := &peer{ - id: ids.BuildTestNodeID([]byte{0x01}), - observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 1}, + id: ids.BuildTestNodeID([]byte{0x01}), + observedUptime: *utils.NewAtomic[uint32](1), } peer2 := &peer{ id: ids.BuildTestNodeID([]byte{0x02}), @@ -42,8 +42,8 @@ func TestSet(t *testing.T) { set.Add(peer1) retrievedPeer1, peer1Found := set.GetByID(peer1.id) require.True(peer1Found) - observed1, _ := peer1.ObservedUptime(constants.PrimaryNetworkID) - observed2, _ := retrievedPeer1.ObservedUptime(constants.PrimaryNetworkID) + observed1 := peer1.ObservedUptime() + observed2 := retrievedPeer1.ObservedUptime() require.Equal(observed1, observed2) require.Equal(1, set.Len()) @@ -51,8 +51,8 @@ func TestSet(t *testing.T) { set.Add(updatedPeer1) retrievedPeer1, peer1Found = set.GetByID(peer1.id) require.True(peer1Found) - observed1, _ = updatedPeer1.ObservedUptime(constants.PrimaryNetworkID) - observed2, _ = retrievedPeer1.ObservedUptime(constants.PrimaryNetworkID) + observed1 = updatedPeer1.ObservedUptime() + observed2 = retrievedPeer1.ObservedUptime() require.Equal(observed1, observed2) require.Equal(1, set.Len()) @@ -60,8 +60,8 @@ func TestSet(t *testing.T) { set.Add(peer2) retrievedPeer2, peer2Found := set.GetByID(peer2.id) require.True(peer2Found) - observed1, _ = peer2.ObservedUptime(constants.PrimaryNetworkID) - observed2, _ = retrievedPeer2.ObservedUptime(constants.PrimaryNetworkID) + observed1 = peer2.ObservedUptime() + observed2 = retrievedPeer2.ObservedUptime() require.Equal(observed1, observed2) require.Equal(2, set.Len()) diff --git a/proto/p2p/p2p.proto b/proto/p2p/p2p.proto index 53c0c84de303..5d0a3a31a1eb 100644 --- a/proto/p2p/p2p.proto +++ b/proto/p2p/p2p.proto @@ -64,16 +64,7 @@ message Message { message Ping { // Uptime percentage on the primary network [0, 100] uint32 uptime = 1; - // Uptime percentage on subnets - repeated SubnetUptime subnet_uptimes = 2; -} - -// SubnetUptime is a descriptor for a peer's perceived uptime on a subnet. -message SubnetUptime { - // Subnet the peer is validating - bytes subnet_id = 1; - // Uptime percentage on the subnet [0, 100] - uint32 uptime = 2; + reserved 2; // Until E upgrade is activated. } // Pong is sent in response to a Ping. diff --git a/proto/pb/p2p/p2p.pb.go b/proto/pb/p2p/p2p.pb.go index bd55ad703546..ecb33f05a920 100644 --- a/proto/pb/p2p/p2p.pb.go +++ b/proto/pb/p2p/p2p.pb.go @@ -498,8 +498,6 @@ type Ping struct { // Uptime percentage on the primary network [0, 100] Uptime uint32 `protobuf:"varint,1,opt,name=uptime,proto3" json:"uptime,omitempty"` - // Uptime percentage on subnets - SubnetUptimes []*SubnetUptime `protobuf:"bytes,2,rep,name=subnet_uptimes,json=subnetUptimes,proto3" json:"subnet_uptimes,omitempty"` } func (x *Ping) Reset() { @@ -541,71 +539,6 @@ func (x *Ping) GetUptime() uint32 { return 0 } -func (x *Ping) GetSubnetUptimes() []*SubnetUptime { - if x != nil { - return x.SubnetUptimes - } - return nil -} - -// SubnetUptime is a descriptor for a peer's perceived uptime on a subnet. -type SubnetUptime struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Subnet the peer is validating - SubnetId []byte `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` - // Uptime percentage on the subnet [0, 100] - Uptime uint32 `protobuf:"varint,2,opt,name=uptime,proto3" json:"uptime,omitempty"` -} - -func (x *SubnetUptime) Reset() { - *x = SubnetUptime{} - if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SubnetUptime) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubnetUptime) ProtoMessage() {} - -func (x *SubnetUptime) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubnetUptime.ProtoReflect.Descriptor instead. -func (*SubnetUptime) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{2} -} - -func (x *SubnetUptime) GetSubnetId() []byte { - if x != nil { - return x.SubnetId - } - return nil -} - -func (x *SubnetUptime) GetUptime() uint32 { - if x != nil { - return x.Uptime - } - return 0 -} - // Pong is sent in response to a Ping. type Pong struct { state protoimpl.MessageState @@ -616,7 +549,7 @@ type Pong struct { func (x *Pong) Reset() { *x = Pong{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[3] + mi := &file_p2p_p2p_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -629,7 +562,7 @@ func (x *Pong) String() string { func (*Pong) ProtoMessage() {} func (x *Pong) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[3] + mi := &file_p2p_p2p_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -642,7 +575,7 @@ func (x *Pong) ProtoReflect() protoreflect.Message { // Deprecated: Use Pong.ProtoReflect.Descriptor instead. func (*Pong) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{3} + return file_p2p_p2p_proto_rawDescGZIP(), []int{2} } // Handshake is the first outbound message sent to a peer when a connection is @@ -684,7 +617,7 @@ type Handshake struct { func (x *Handshake) Reset() { *x = Handshake{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[4] + mi := &file_p2p_p2p_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -697,7 +630,7 @@ func (x *Handshake) String() string { func (*Handshake) ProtoMessage() {} func (x *Handshake) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[4] + mi := &file_p2p_p2p_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -710,7 +643,7 @@ func (x *Handshake) ProtoReflect() protoreflect.Message { // Deprecated: Use Handshake.ProtoReflect.Descriptor instead. func (*Handshake) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{4} + return file_p2p_p2p_proto_rawDescGZIP(), []int{3} } func (x *Handshake) GetNetworkId() uint32 { @@ -814,7 +747,7 @@ type Client struct { func (x *Client) Reset() { *x = Client{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[5] + mi := &file_p2p_p2p_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -827,7 +760,7 @@ func (x *Client) String() string { func (*Client) ProtoMessage() {} func (x *Client) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[5] + mi := &file_p2p_p2p_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -840,7 +773,7 @@ func (x *Client) ProtoReflect() protoreflect.Message { // Deprecated: Use Client.ProtoReflect.Descriptor instead. func (*Client) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{5} + return file_p2p_p2p_proto_rawDescGZIP(), []int{4} } func (x *Client) GetName() string { @@ -884,7 +817,7 @@ type BloomFilter struct { func (x *BloomFilter) Reset() { *x = BloomFilter{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[6] + mi := &file_p2p_p2p_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -897,7 +830,7 @@ func (x *BloomFilter) String() string { func (*BloomFilter) ProtoMessage() {} func (x *BloomFilter) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[6] + mi := &file_p2p_p2p_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -910,7 +843,7 @@ func (x *BloomFilter) ProtoReflect() protoreflect.Message { // Deprecated: Use BloomFilter.ProtoReflect.Descriptor instead. func (*BloomFilter) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{6} + return file_p2p_p2p_proto_rawDescGZIP(), []int{5} } func (x *BloomFilter) GetFilter() []byte { @@ -950,7 +883,7 @@ type ClaimedIpPort struct { func (x *ClaimedIpPort) Reset() { *x = ClaimedIpPort{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[7] + mi := &file_p2p_p2p_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -963,7 +896,7 @@ func (x *ClaimedIpPort) String() string { func (*ClaimedIpPort) ProtoMessage() {} func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[7] + mi := &file_p2p_p2p_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -976,7 +909,7 @@ func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { // Deprecated: Use ClaimedIpPort.ProtoReflect.Descriptor instead. func (*ClaimedIpPort) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{7} + return file_p2p_p2p_proto_rawDescGZIP(), []int{6} } func (x *ClaimedIpPort) GetX509Certificate() []byte { @@ -1038,7 +971,7 @@ type GetPeerList struct { func (x *GetPeerList) Reset() { *x = GetPeerList{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[8] + mi := &file_p2p_p2p_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1051,7 +984,7 @@ func (x *GetPeerList) String() string { func (*GetPeerList) ProtoMessage() {} func (x *GetPeerList) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[8] + mi := &file_p2p_p2p_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1064,7 +997,7 @@ func (x *GetPeerList) ProtoReflect() protoreflect.Message { // Deprecated: Use GetPeerList.ProtoReflect.Descriptor instead. func (*GetPeerList) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{8} + return file_p2p_p2p_proto_rawDescGZIP(), []int{7} } func (x *GetPeerList) GetKnownPeers() *BloomFilter { @@ -1094,7 +1027,7 @@ type PeerList struct { func (x *PeerList) Reset() { *x = PeerList{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1107,7 +1040,7 @@ func (x *PeerList) String() string { func (*PeerList) ProtoMessage() {} func (x *PeerList) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1120,7 +1053,7 @@ func (x *PeerList) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerList.ProtoReflect.Descriptor instead. func (*PeerList) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{9} + return file_p2p_p2p_proto_rawDescGZIP(), []int{8} } func (x *PeerList) GetClaimedIpPorts() []*ClaimedIpPort { @@ -1148,7 +1081,7 @@ type GetStateSummaryFrontier struct { func (x *GetStateSummaryFrontier) Reset() { *x = GetStateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1161,7 +1094,7 @@ func (x *GetStateSummaryFrontier) String() string { func (*GetStateSummaryFrontier) ProtoMessage() {} func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1174,7 +1107,7 @@ func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryFrontier.ProtoReflect.Descriptor instead. func (*GetStateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{10} + return file_p2p_p2p_proto_rawDescGZIP(), []int{9} } func (x *GetStateSummaryFrontier) GetChainId() []byte { @@ -1215,7 +1148,7 @@ type StateSummaryFrontier struct { func (x *StateSummaryFrontier) Reset() { *x = StateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1228,7 +1161,7 @@ func (x *StateSummaryFrontier) String() string { func (*StateSummaryFrontier) ProtoMessage() {} func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1241,7 +1174,7 @@ func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryFrontier.ProtoReflect.Descriptor instead. func (*StateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{11} + return file_p2p_p2p_proto_rawDescGZIP(), []int{10} } func (x *StateSummaryFrontier) GetChainId() []byte { @@ -1285,7 +1218,7 @@ type GetAcceptedStateSummary struct { func (x *GetAcceptedStateSummary) Reset() { *x = GetAcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1298,7 +1231,7 @@ func (x *GetAcceptedStateSummary) String() string { func (*GetAcceptedStateSummary) ProtoMessage() {} func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1311,7 +1244,7 @@ func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedStateSummary.ProtoReflect.Descriptor instead. func (*GetAcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{12} + return file_p2p_p2p_proto_rawDescGZIP(), []int{11} } func (x *GetAcceptedStateSummary) GetChainId() []byte { @@ -1359,7 +1292,7 @@ type AcceptedStateSummary struct { func (x *AcceptedStateSummary) Reset() { *x = AcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1372,7 +1305,7 @@ func (x *AcceptedStateSummary) String() string { func (*AcceptedStateSummary) ProtoMessage() {} func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1385,7 +1318,7 @@ func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedStateSummary.ProtoReflect.Descriptor instead. func (*AcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{13} + return file_p2p_p2p_proto_rawDescGZIP(), []int{12} } func (x *AcceptedStateSummary) GetChainId() []byte { @@ -1428,7 +1361,7 @@ type GetAcceptedFrontier struct { func (x *GetAcceptedFrontier) Reset() { *x = GetAcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1441,7 +1374,7 @@ func (x *GetAcceptedFrontier) String() string { func (*GetAcceptedFrontier) ProtoMessage() {} func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1454,7 +1387,7 @@ func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedFrontier.ProtoReflect.Descriptor instead. func (*GetAcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{14} + return file_p2p_p2p_proto_rawDescGZIP(), []int{13} } func (x *GetAcceptedFrontier) GetChainId() []byte { @@ -1497,7 +1430,7 @@ type AcceptedFrontier struct { func (x *AcceptedFrontier) Reset() { *x = AcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1510,7 +1443,7 @@ func (x *AcceptedFrontier) String() string { func (*AcceptedFrontier) ProtoMessage() {} func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1523,7 +1456,7 @@ func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedFrontier.ProtoReflect.Descriptor instead. func (*AcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{15} + return file_p2p_p2p_proto_rawDescGZIP(), []int{14} } func (x *AcceptedFrontier) GetChainId() []byte { @@ -1569,7 +1502,7 @@ type GetAccepted struct { func (x *GetAccepted) Reset() { *x = GetAccepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1582,7 +1515,7 @@ func (x *GetAccepted) String() string { func (*GetAccepted) ProtoMessage() {} func (x *GetAccepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1595,7 +1528,7 @@ func (x *GetAccepted) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAccepted.ProtoReflect.Descriptor instead. func (*GetAccepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{16} + return file_p2p_p2p_proto_rawDescGZIP(), []int{15} } func (x *GetAccepted) GetChainId() []byte { @@ -1646,7 +1579,7 @@ type Accepted struct { func (x *Accepted) Reset() { *x = Accepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1659,7 +1592,7 @@ func (x *Accepted) String() string { func (*Accepted) ProtoMessage() {} func (x *Accepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1672,7 +1605,7 @@ func (x *Accepted) ProtoReflect() protoreflect.Message { // Deprecated: Use Accepted.ProtoReflect.Descriptor instead. func (*Accepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{17} + return file_p2p_p2p_proto_rawDescGZIP(), []int{16} } func (x *Accepted) GetChainId() []byte { @@ -1719,7 +1652,7 @@ type GetAncestors struct { func (x *GetAncestors) Reset() { *x = GetAncestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1732,7 +1665,7 @@ func (x *GetAncestors) String() string { func (*GetAncestors) ProtoMessage() {} func (x *GetAncestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1745,7 +1678,7 @@ func (x *GetAncestors) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestors.ProtoReflect.Descriptor instead. func (*GetAncestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{18} + return file_p2p_p2p_proto_rawDescGZIP(), []int{17} } func (x *GetAncestors) GetChainId() []byte { @@ -1803,7 +1736,7 @@ type Ancestors struct { func (x *Ancestors) Reset() { *x = Ancestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1816,7 +1749,7 @@ func (x *Ancestors) String() string { func (*Ancestors) ProtoMessage() {} func (x *Ancestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1829,7 +1762,7 @@ func (x *Ancestors) ProtoReflect() protoreflect.Message { // Deprecated: Use Ancestors.ProtoReflect.Descriptor instead. func (*Ancestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{19} + return file_p2p_p2p_proto_rawDescGZIP(), []int{18} } func (x *Ancestors) GetChainId() []byte { @@ -1874,7 +1807,7 @@ type Get struct { func (x *Get) Reset() { *x = Get{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1887,7 +1820,7 @@ func (x *Get) String() string { func (*Get) ProtoMessage() {} func (x *Get) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1900,7 +1833,7 @@ func (x *Get) ProtoReflect() protoreflect.Message { // Deprecated: Use Get.ProtoReflect.Descriptor instead. func (*Get) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{20} + return file_p2p_p2p_proto_rawDescGZIP(), []int{19} } func (x *Get) GetChainId() []byte { @@ -1948,7 +1881,7 @@ type Put struct { func (x *Put) Reset() { *x = Put{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1961,7 +1894,7 @@ func (x *Put) String() string { func (*Put) ProtoMessage() {} func (x *Put) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1974,7 +1907,7 @@ func (x *Put) ProtoReflect() protoreflect.Message { // Deprecated: Use Put.ProtoReflect.Descriptor instead. func (*Put) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{21} + return file_p2p_p2p_proto_rawDescGZIP(), []int{20} } func (x *Put) GetChainId() []byte { @@ -2021,7 +1954,7 @@ type PushQuery struct { func (x *PushQuery) Reset() { *x = PushQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2034,7 +1967,7 @@ func (x *PushQuery) String() string { func (*PushQuery) ProtoMessage() {} func (x *PushQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2047,7 +1980,7 @@ func (x *PushQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PushQuery.ProtoReflect.Descriptor instead. func (*PushQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{22} + return file_p2p_p2p_proto_rawDescGZIP(), []int{21} } func (x *PushQuery) GetChainId() []byte { @@ -2108,7 +2041,7 @@ type PullQuery struct { func (x *PullQuery) Reset() { *x = PullQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2121,7 +2054,7 @@ func (x *PullQuery) String() string { func (*PullQuery) ProtoMessage() {} func (x *PullQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2134,7 +2067,7 @@ func (x *PullQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PullQuery.ProtoReflect.Descriptor instead. func (*PullQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{23} + return file_p2p_p2p_proto_rawDescGZIP(), []int{22} } func (x *PullQuery) GetChainId() []byte { @@ -2194,7 +2127,7 @@ type Chits struct { func (x *Chits) Reset() { *x = Chits{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[24] + mi := &file_p2p_p2p_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2207,7 +2140,7 @@ func (x *Chits) String() string { func (*Chits) ProtoMessage() {} func (x *Chits) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[24] + mi := &file_p2p_p2p_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2220,7 +2153,7 @@ func (x *Chits) ProtoReflect() protoreflect.Message { // Deprecated: Use Chits.ProtoReflect.Descriptor instead. func (*Chits) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{24} + return file_p2p_p2p_proto_rawDescGZIP(), []int{23} } func (x *Chits) GetChainId() []byte { @@ -2280,7 +2213,7 @@ type AppRequest struct { func (x *AppRequest) Reset() { *x = AppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[25] + mi := &file_p2p_p2p_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2293,7 +2226,7 @@ func (x *AppRequest) String() string { func (*AppRequest) ProtoMessage() {} func (x *AppRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[25] + mi := &file_p2p_p2p_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2306,7 +2239,7 @@ func (x *AppRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequest.ProtoReflect.Descriptor instead. func (*AppRequest) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{25} + return file_p2p_p2p_proto_rawDescGZIP(), []int{24} } func (x *AppRequest) GetChainId() []byte { @@ -2354,7 +2287,7 @@ type AppResponse struct { func (x *AppResponse) Reset() { *x = AppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[26] + mi := &file_p2p_p2p_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2367,7 +2300,7 @@ func (x *AppResponse) String() string { func (*AppResponse) ProtoMessage() {} func (x *AppResponse) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[26] + mi := &file_p2p_p2p_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2380,7 +2313,7 @@ func (x *AppResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponse.ProtoReflect.Descriptor instead. func (*AppResponse) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{26} + return file_p2p_p2p_proto_rawDescGZIP(), []int{25} } func (x *AppResponse) GetChainId() []byte { @@ -2423,7 +2356,7 @@ type AppError struct { func (x *AppError) Reset() { *x = AppError{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[27] + mi := &file_p2p_p2p_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2436,7 +2369,7 @@ func (x *AppError) String() string { func (*AppError) ProtoMessage() {} func (x *AppError) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[27] + mi := &file_p2p_p2p_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2449,7 +2382,7 @@ func (x *AppError) ProtoReflect() protoreflect.Message { // Deprecated: Use AppError.ProtoReflect.Descriptor instead. func (*AppError) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{27} + return file_p2p_p2p_proto_rawDescGZIP(), []int{26} } func (x *AppError) GetChainId() []byte { @@ -2495,7 +2428,7 @@ type AppGossip struct { func (x *AppGossip) Reset() { *x = AppGossip{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[28] + mi := &file_p2p_p2p_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2508,7 +2441,7 @@ func (x *AppGossip) String() string { func (*AppGossip) ProtoMessage() {} func (x *AppGossip) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[28] + mi := &file_p2p_p2p_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2521,7 +2454,7 @@ func (x *AppGossip) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossip.ProtoReflect.Descriptor instead. func (*AppGossip) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{28} + return file_p2p_p2p_proto_rawDescGZIP(), []int{27} } func (x *AppGossip) GetChainId() []byte { @@ -2629,240 +2562,232 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x6f, 0x72, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x08, 0x61, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4a, 0x04, - 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x24, 0x10, 0x25, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x69, + 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x24, 0x10, 0x25, 0x22, 0x24, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, - 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, - 0x64, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x6e, - 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xb3, 0x03, - 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, - 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, - 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x70, 0x5f, 0x73, 0x69, 0x67, 0x6e, - 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, - 0x69, 0x70, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x23, 0x0a, - 0x0e, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x70, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x53, - 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, - 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x70, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0d, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x41, 0x63, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x41, 0x63, 0x70, 0x73, 0x12, 0x31, 0x0a, 0x0b, - 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, - 0x1c, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x62, 0x6c, 0x73, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x69, 0x70, 0x42, 0x6c, 0x73, 0x53, 0x69, 0x67, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x5e, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, - 0x74, 0x63, 0x68, 0x22, 0x39, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, - 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x22, 0xbd, - 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, - 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, - 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, - 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, - 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x40, - 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x31, 0x0a, - 0x0b, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, - 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, - 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, - 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, - 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, - 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, - 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, 0x71, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, - 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x6f, 0x0a, 0x10, 0x41, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, - 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8e, 0x01, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, + 0x22, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x22, 0xb3, 0x03, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, + 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, + 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, + 0x69, 0x70, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x69, 0x70, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0e, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x69, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x70, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x53, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x70, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x0d, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x41, 0x63, 0x70, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x70, 0x73, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x41, + 0x63, 0x70, 0x73, 0x12, 0x31, 0x0a, 0x0b, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x65, 0x65, + 0x72, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x42, + 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x62, 0x6c, 0x73, + 0x5f, 0x73, 0x69, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x69, 0x70, 0x42, 0x6c, + 0x73, 0x53, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x5e, 0x0a, 0x06, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0x39, 0x0a, 0x0b, 0x42, 0x6c, + 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x73, 0x61, 0x6c, 0x74, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, + 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, + 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x0b, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x65, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, + 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x69, + 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, + 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, + 0x73, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x89, + 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x69, 0x0a, 0x08, 0x41, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, - 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x22, 0x65, 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, - 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x03, 0x47, 0x65, - 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, 0x71, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, + 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x22, 0x6f, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, + 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x22, 0x8e, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x22, 0x5d, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, - 0xb0, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, - 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x22, 0xb5, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, - 0x68, 0x69, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x22, 0x69, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb9, 0x01, + 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, + 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x65, 0x0a, 0x09, 0x41, 0x6e, 0x63, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, + 0x22, 0x84, 0x01, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x5d, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, - 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x49, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, - 0x69, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x13, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, - 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, - 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, - 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xb5, 0x01, 0x0a, 0x09, 0x50, 0x75, + 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x88, - 0x01, 0x0a, 0x08, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, + 0x06, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, - 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, - 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, - 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, - 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, - 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, - 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x7f, + 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, + 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, + 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, + 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, 0x57, 0x4d, + 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, + 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2878,75 +2803,73 @@ func file_p2p_p2p_proto_rawDescGZIP() []byte { } var file_p2p_p2p_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_p2p_p2p_proto_goTypes = []interface{}{ (EngineType)(0), // 0: p2p.EngineType (*Message)(nil), // 1: p2p.Message (*Ping)(nil), // 2: p2p.Ping - (*SubnetUptime)(nil), // 3: p2p.SubnetUptime - (*Pong)(nil), // 4: p2p.Pong - (*Handshake)(nil), // 5: p2p.Handshake - (*Client)(nil), // 6: p2p.Client - (*BloomFilter)(nil), // 7: p2p.BloomFilter - (*ClaimedIpPort)(nil), // 8: p2p.ClaimedIpPort - (*GetPeerList)(nil), // 9: p2p.GetPeerList - (*PeerList)(nil), // 10: p2p.PeerList - (*GetStateSummaryFrontier)(nil), // 11: p2p.GetStateSummaryFrontier - (*StateSummaryFrontier)(nil), // 12: p2p.StateSummaryFrontier - (*GetAcceptedStateSummary)(nil), // 13: p2p.GetAcceptedStateSummary - (*AcceptedStateSummary)(nil), // 14: p2p.AcceptedStateSummary - (*GetAcceptedFrontier)(nil), // 15: p2p.GetAcceptedFrontier - (*AcceptedFrontier)(nil), // 16: p2p.AcceptedFrontier - (*GetAccepted)(nil), // 17: p2p.GetAccepted - (*Accepted)(nil), // 18: p2p.Accepted - (*GetAncestors)(nil), // 19: p2p.GetAncestors - (*Ancestors)(nil), // 20: p2p.Ancestors - (*Get)(nil), // 21: p2p.Get - (*Put)(nil), // 22: p2p.Put - (*PushQuery)(nil), // 23: p2p.PushQuery - (*PullQuery)(nil), // 24: p2p.PullQuery - (*Chits)(nil), // 25: p2p.Chits - (*AppRequest)(nil), // 26: p2p.AppRequest - (*AppResponse)(nil), // 27: p2p.AppResponse - (*AppError)(nil), // 28: p2p.AppError - (*AppGossip)(nil), // 29: p2p.AppGossip + (*Pong)(nil), // 3: p2p.Pong + (*Handshake)(nil), // 4: p2p.Handshake + (*Client)(nil), // 5: p2p.Client + (*BloomFilter)(nil), // 6: p2p.BloomFilter + (*ClaimedIpPort)(nil), // 7: p2p.ClaimedIpPort + (*GetPeerList)(nil), // 8: p2p.GetPeerList + (*PeerList)(nil), // 9: p2p.PeerList + (*GetStateSummaryFrontier)(nil), // 10: p2p.GetStateSummaryFrontier + (*StateSummaryFrontier)(nil), // 11: p2p.StateSummaryFrontier + (*GetAcceptedStateSummary)(nil), // 12: p2p.GetAcceptedStateSummary + (*AcceptedStateSummary)(nil), // 13: p2p.AcceptedStateSummary + (*GetAcceptedFrontier)(nil), // 14: p2p.GetAcceptedFrontier + (*AcceptedFrontier)(nil), // 15: p2p.AcceptedFrontier + (*GetAccepted)(nil), // 16: p2p.GetAccepted + (*Accepted)(nil), // 17: p2p.Accepted + (*GetAncestors)(nil), // 18: p2p.GetAncestors + (*Ancestors)(nil), // 19: p2p.Ancestors + (*Get)(nil), // 20: p2p.Get + (*Put)(nil), // 21: p2p.Put + (*PushQuery)(nil), // 22: p2p.PushQuery + (*PullQuery)(nil), // 23: p2p.PullQuery + (*Chits)(nil), // 24: p2p.Chits + (*AppRequest)(nil), // 25: p2p.AppRequest + (*AppResponse)(nil), // 26: p2p.AppResponse + (*AppError)(nil), // 27: p2p.AppError + (*AppGossip)(nil), // 28: p2p.AppGossip } var file_p2p_p2p_proto_depIdxs = []int32{ 2, // 0: p2p.Message.ping:type_name -> p2p.Ping - 4, // 1: p2p.Message.pong:type_name -> p2p.Pong - 5, // 2: p2p.Message.handshake:type_name -> p2p.Handshake - 9, // 3: p2p.Message.get_peer_list:type_name -> p2p.GetPeerList - 10, // 4: p2p.Message.peer_list:type_name -> p2p.PeerList - 11, // 5: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier - 12, // 6: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier - 13, // 7: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary - 14, // 8: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary - 15, // 9: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier - 16, // 10: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier - 17, // 11: p2p.Message.get_accepted:type_name -> p2p.GetAccepted - 18, // 12: p2p.Message.accepted:type_name -> p2p.Accepted - 19, // 13: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors - 20, // 14: p2p.Message.ancestors:type_name -> p2p.Ancestors - 21, // 15: p2p.Message.get:type_name -> p2p.Get - 22, // 16: p2p.Message.put:type_name -> p2p.Put - 23, // 17: p2p.Message.push_query:type_name -> p2p.PushQuery - 24, // 18: p2p.Message.pull_query:type_name -> p2p.PullQuery - 25, // 19: p2p.Message.chits:type_name -> p2p.Chits - 26, // 20: p2p.Message.app_request:type_name -> p2p.AppRequest - 27, // 21: p2p.Message.app_response:type_name -> p2p.AppResponse - 29, // 22: p2p.Message.app_gossip:type_name -> p2p.AppGossip - 28, // 23: p2p.Message.app_error:type_name -> p2p.AppError - 3, // 24: p2p.Ping.subnet_uptimes:type_name -> p2p.SubnetUptime - 6, // 25: p2p.Handshake.client:type_name -> p2p.Client - 7, // 26: p2p.Handshake.known_peers:type_name -> p2p.BloomFilter - 7, // 27: p2p.GetPeerList.known_peers:type_name -> p2p.BloomFilter - 8, // 28: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort - 0, // 29: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType - 30, // [30:30] is the sub-list for method output_type - 30, // [30:30] is the sub-list for method input_type - 30, // [30:30] is the sub-list for extension type_name - 30, // [30:30] is the sub-list for extension extendee - 0, // [0:30] is the sub-list for field type_name + 3, // 1: p2p.Message.pong:type_name -> p2p.Pong + 4, // 2: p2p.Message.handshake:type_name -> p2p.Handshake + 8, // 3: p2p.Message.get_peer_list:type_name -> p2p.GetPeerList + 9, // 4: p2p.Message.peer_list:type_name -> p2p.PeerList + 10, // 5: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier + 11, // 6: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier + 12, // 7: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary + 13, // 8: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary + 14, // 9: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier + 15, // 10: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier + 16, // 11: p2p.Message.get_accepted:type_name -> p2p.GetAccepted + 17, // 12: p2p.Message.accepted:type_name -> p2p.Accepted + 18, // 13: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors + 19, // 14: p2p.Message.ancestors:type_name -> p2p.Ancestors + 20, // 15: p2p.Message.get:type_name -> p2p.Get + 21, // 16: p2p.Message.put:type_name -> p2p.Put + 22, // 17: p2p.Message.push_query:type_name -> p2p.PushQuery + 23, // 18: p2p.Message.pull_query:type_name -> p2p.PullQuery + 24, // 19: p2p.Message.chits:type_name -> p2p.Chits + 25, // 20: p2p.Message.app_request:type_name -> p2p.AppRequest + 26, // 21: p2p.Message.app_response:type_name -> p2p.AppResponse + 28, // 22: p2p.Message.app_gossip:type_name -> p2p.AppGossip + 27, // 23: p2p.Message.app_error:type_name -> p2p.AppError + 5, // 24: p2p.Handshake.client:type_name -> p2p.Client + 6, // 25: p2p.Handshake.known_peers:type_name -> p2p.BloomFilter + 6, // 26: p2p.GetPeerList.known_peers:type_name -> p2p.BloomFilter + 7, // 27: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort + 0, // 28: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_p2p_p2p_proto_init() } @@ -2980,18 +2903,6 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SubnetUptime); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_p2p_p2p_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Pong); i { case 0: return &v.state @@ -3003,7 +2914,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Handshake); i { case 0: return &v.state @@ -3015,7 +2926,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Client); i { case 0: return &v.state @@ -3027,7 +2938,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BloomFilter); i { case 0: return &v.state @@ -3039,7 +2950,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClaimedIpPort); i { case 0: return &v.state @@ -3051,7 +2962,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetPeerList); i { case 0: return &v.state @@ -3063,7 +2974,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PeerList); i { case 0: return &v.state @@ -3075,7 +2986,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryFrontier); i { case 0: return &v.state @@ -3087,7 +2998,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryFrontier); i { case 0: return &v.state @@ -3099,7 +3010,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAcceptedStateSummary); i { case 0: return &v.state @@ -3111,7 +3022,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AcceptedStateSummary); i { case 0: return &v.state @@ -3123,7 +3034,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAcceptedFrontier); i { case 0: return &v.state @@ -3135,7 +3046,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AcceptedFrontier); i { case 0: return &v.state @@ -3147,7 +3058,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAccepted); i { case 0: return &v.state @@ -3159,7 +3070,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Accepted); i { case 0: return &v.state @@ -3171,7 +3082,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAncestors); i { case 0: return &v.state @@ -3183,7 +3094,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Ancestors); i { case 0: return &v.state @@ -3195,7 +3106,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Get); i { case 0: return &v.state @@ -3207,7 +3118,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Put); i { case 0: return &v.state @@ -3219,7 +3130,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PushQuery); i { case 0: return &v.state @@ -3231,7 +3142,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PullQuery); i { case 0: return &v.state @@ -3243,7 +3154,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Chits); i { case 0: return &v.state @@ -3255,7 +3166,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppRequest); i { case 0: return &v.state @@ -3267,7 +3178,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppResponse); i { case 0: return &v.state @@ -3279,7 +3190,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppError); i { case 0: return &v.state @@ -3291,7 +3202,7 @@ func file_p2p_p2p_proto_init() { return nil } } - file_p2p_p2p_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_p2p_p2p_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossip); i { case 0: return &v.state @@ -3337,7 +3248,7 @@ func file_p2p_p2p_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2p_p2p_proto_rawDesc, NumEnums: 1, - NumMessages: 29, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, From a2f69d07c143400add24c2fc8ebc04e914e96769 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 25 Jul 2024 21:06:06 +0300 Subject: [PATCH 004/155] remove subnet uptimes from api --- api/info/client.go | 8 +++----- api/info/service.go | 9 ++------- api/info/service.md | 14 +------------- 3 files changed, 6 insertions(+), 25 deletions(-) diff --git a/api/info/client.go b/api/info/client.go index 15812cd5c213..b4c6c58d31c6 100644 --- a/api/info/client.go +++ b/api/info/client.go @@ -27,7 +27,7 @@ type Client interface { Peers(context.Context, ...rpc.Option) ([]Peer, error) IsBootstrapped(context.Context, string, ...rpc.Option) (bool, error) GetTxFee(context.Context, ...rpc.Option) (*GetTxFeeResponse, error) - Uptime(context.Context, ids.ID, ...rpc.Option) (*UptimeResponse, error) + Uptime(context.Context, ...rpc.Option) (*UptimeResponse, error) GetVMs(context.Context, ...rpc.Option) (map[ids.ID][]string, error) } @@ -101,11 +101,9 @@ func (c *client) GetTxFee(ctx context.Context, options ...rpc.Option) (*GetTxFee return res, err } -func (c *client) Uptime(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (*UptimeResponse, error) { +func (c *client) Uptime(ctx context.Context, options ...rpc.Option) (*UptimeResponse, error) { res := &UptimeResponse{} - err := c.requester.SendRequest(ctx, "info.uptime", &UptimeRequest{ - SubnetID: subnetID, - }, res, options...) + err := c.requester.SendRequest(ctx, "info.uptime", struct{}{}, res, options...) return res, err } diff --git a/api/info/service.go b/api/info/service.go index fd0117c5a088..5f9e4b73934d 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -307,18 +307,13 @@ type UptimeResponse struct { WeightedAveragePercentage json.Float64 `json:"weightedAveragePercentage"` } -type UptimeRequest struct { - // if omitted, defaults to primary network - SubnetID ids.ID `json:"subnetID"` -} - -func (i *Info) Uptime(_ *http.Request, args *UptimeRequest, reply *UptimeResponse) error { +func (i *Info) Uptime(_ *http.Request, _ *struct{}, reply *UptimeResponse) error { i.log.Debug("API called", zap.String("service", "info"), zap.String("method", "uptime"), ) - result, err := i.networking.NodeUptime(args.SubnetID) + result, err := i.networking.NodeUptime() if err != nil { return fmt.Errorf("couldn't get node uptime: %w", err) } diff --git a/api/info/service.md b/api/info/service.md index d7e70e269dff..f3d56bdf1061 100644 --- a/api/info/service.md +++ b/api/info/service.md @@ -526,7 +526,6 @@ info.peers({ lastReceived: string, benched: string[], observedUptime: int, - observedSubnetUptime: map[string]int, } } ``` @@ -542,7 +541,6 @@ info.peers({ - `lastReceived` is the timestamp of last message received from the peer. - `benched` shows chain IDs that the peer is being benched. - `observedUptime` is this node's primary network uptime, observed by the peer. -- `observedSubnetUptime` is a map of Subnet IDs to this node's Subnet uptimes, observed by the peer. **Example Call:** @@ -575,7 +573,6 @@ curl -X POST --data '{ "lastReceived": "2020-06-01T15:22:57Z", "benched": [], "observedUptime": "99", - "observedSubnetUptimes": {}, "trackedSubnets": [], "benched": [] }, @@ -588,9 +585,6 @@ curl -X POST --data '{ "lastReceived": "2020-06-01T15:22:34Z", "benched": [], "observedUptime": "75", - "observedSubnetUptimes": { - "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL": "100" - }, "trackedSubnets": [ "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL" ], @@ -605,7 +599,6 @@ curl -X POST --data '{ "lastReceived": "2020-06-01T15:22:55Z", "benched": [], "observedUptime": "95", - "observedSubnetUptimes": {}, "trackedSubnets": [], "benched": [] } @@ -623,18 +616,13 @@ Other sources may be using data gathered with incomplete (limited) information. **Signature:** ```sh -info.uptime({ - subnetID: string // optional -}) -> +info.uptime() -> { rewardingStakePercentage: float64, weightedAveragePercentage: float64 } ``` -- `subnetID` is the Subnet to get the uptime of. If not provided, returns the uptime of the node on - the primary network. - - `rewardingStakePercentage` is the percent of stake which thinks this node is above the uptime requirement. - `weightedAveragePercentage` is the stake-weighted average of all observed uptimes for this node. From ee287c6d0868df860b9542fc3a40f5512ffb6f4b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 25 Jul 2024 21:06:16 +0300 Subject: [PATCH 005/155] add tracked bool --- snow/uptime/locked_calculator_test.go | 6 +++--- snow/uptime/manager.go | 18 ++++++++++++++++++ snow/uptime/mock_calculator.go | 24 ++++++++++++------------ 3 files changed, 33 insertions(+), 15 deletions(-) diff --git a/snow/uptime/locked_calculator_test.go b/snow/uptime/locked_calculator_test.go index 9e5edaad8c63..c65fb084cddb 100644 --- a/snow/uptime/locked_calculator_test.go +++ b/snow/uptime/locked_calculator_test.go @@ -50,15 +50,15 @@ func TestLockedCalculator(t *testing.T) { isBootstrapped.Set(true) // Should return the value from the mocked inner calculator - mockCalc.EXPECT().CalculateUptime(gomock.Any(), gomock.Any()).AnyTimes().Return(time.Duration(0), time.Time{}, errTest) + mockCalc.EXPECT().CalculateUptime(gomock.Any()).AnyTimes().Return(time.Duration(0), time.Time{}, errTest) _, _, err = lc.CalculateUptime(nodeID) require.ErrorIs(err, errTest) - mockCalc.EXPECT().CalculateUptimePercent(gomock.Any(), gomock.Any()).AnyTimes().Return(float64(0), errTest) + mockCalc.EXPECT().CalculateUptimePercent(gomock.Any()).AnyTimes().Return(float64(0), errTest) _, err = lc.CalculateUptimePercent(nodeID) require.ErrorIs(err, errTest) - mockCalc.EXPECT().CalculateUptimePercentFrom(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(float64(0), errTest) + mockCalc.EXPECT().CalculateUptimePercentFrom(gomock.Any(), gomock.Any()).AnyTimes().Return(float64(0), errTest) _, err = lc.CalculateUptimePercentFrom(nodeID, time.Now()) require.ErrorIs(err, errTest) } diff --git a/snow/uptime/manager.go b/snow/uptime/manager.go index 2a915da3fb6c..93aaa4a1b139 100644 --- a/snow/uptime/manager.go +++ b/snow/uptime/manager.go @@ -40,6 +40,7 @@ type manager struct { state State connections map[ids.NodeID]time.Time // nodeID -> time + tracked bool } func NewManager(state State, clk *mockable.Clock) Manager { @@ -70,10 +71,18 @@ func (m *manager) StartTracking(nodeIDs []ids.NodeID) error { return err } } + m.tracked = true return nil } func (m *manager) StopTracking(nodeIDs []ids.NodeID) error { + // TODO: this was not here before, should we add it? + if !m.tracked { + return nil + } + defer func() { + m.tracked = false + }() now := m.clock.UnixTime() for _, nodeID := range nodeIDs { // If the node is already connected, then we can just @@ -139,6 +148,12 @@ func (m *manager) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, return upDuration, lastUpdated, nil } + if !m.tracked { + durationOffline := now.Sub(lastUpdated) + newUpDuration := upDuration + durationOffline + return newUpDuration, now, nil + } + timeConnected, isConnected := m.connections[nodeID] if !isConnected { return upDuration, now, nil @@ -187,6 +202,9 @@ func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.T // updateUptime updates the uptime of the node on the state by the amount // of time that the node has been connected. func (m *manager) updateUptime(nodeID ids.NodeID) error { + if !m.tracked { + return nil + } newDuration, newLastUpdated, err := m.CalculateUptime(nodeID) if err == database.ErrNotFound { // If a non-validator disconnects, we don't care diff --git a/snow/uptime/mock_calculator.go b/snow/uptime/mock_calculator.go index cc5b5942e639..5e2485cc6ddb 100644 --- a/snow/uptime/mock_calculator.go +++ b/snow/uptime/mock_calculator.go @@ -41,9 +41,9 @@ func (m *MockCalculator) EXPECT() *MockCalculatorMockRecorder { } // CalculateUptime mocks base method. -func (m *MockCalculator) CalculateUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Duration, time.Time, error) { +func (m *MockCalculator) CalculateUptime(arg0 ids.NodeID) (time.Duration, time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CalculateUptime", arg0, arg1) + ret := m.ctrl.Call(m, "CalculateUptime", arg0) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(time.Time) ret2, _ := ret[2].(error) @@ -51,37 +51,37 @@ func (m *MockCalculator) CalculateUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Dur } // CalculateUptime indicates an expected call of CalculateUptime. -func (mr *MockCalculatorMockRecorder) CalculateUptime(arg0, arg1 any) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptime(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptime", reflect.TypeOf((*MockCalculator)(nil).CalculateUptime), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptime", reflect.TypeOf((*MockCalculator)(nil).CalculateUptime), arg0) } // CalculateUptimePercent mocks base method. -func (m *MockCalculator) CalculateUptimePercent(arg0 ids.NodeID, arg1 ids.ID) (float64, error) { +func (m *MockCalculator) CalculateUptimePercent(arg0 ids.NodeID) (float64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CalculateUptimePercent", arg0, arg1) + ret := m.ctrl.Call(m, "CalculateUptimePercent", arg0) ret0, _ := ret[0].(float64) ret1, _ := ret[1].(error) return ret0, ret1 } // CalculateUptimePercent indicates an expected call of CalculateUptimePercent. -func (mr *MockCalculatorMockRecorder) CalculateUptimePercent(arg0, arg1 any) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptimePercent(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercent", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercent), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercent", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercent), arg0) } // CalculateUptimePercentFrom mocks base method. -func (m *MockCalculator) CalculateUptimePercentFrom(arg0 ids.NodeID, arg1 ids.ID, arg2 time.Time) (float64, error) { +func (m *MockCalculator) CalculateUptimePercentFrom(arg0 ids.NodeID, arg1 time.Time) (float64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CalculateUptimePercentFrom", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "CalculateUptimePercentFrom", arg0, arg1) ret0, _ := ret[0].(float64) ret1, _ := ret[1].(error) return ret0, ret1 } // CalculateUptimePercentFrom indicates an expected call of CalculateUptimePercentFrom. -func (mr *MockCalculatorMockRecorder) CalculateUptimePercentFrom(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptimePercentFrom(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercentFrom", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercentFrom), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercentFrom", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercentFrom), arg0, arg1) } From a42e48cff65d5538fd535ddda18591f4702a787b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 26 Jul 2024 16:10:18 +0300 Subject: [PATCH 006/155] remove unnecessary err --- network/network.go | 1 - 1 file changed, 1 deletion(-) diff --git a/network/network.go b/network/network.go index e92bc248a218..6567454cf6c2 100644 --- a/network/network.go +++ b/network/network.go @@ -52,7 +52,6 @@ var ( _ Network = (*network)(nil) errNotValidator = errors.New("node is not a validator") - errNotTracked = errors.New("subnet is not tracked") errExpectedProxy = errors.New("expected proxy") errExpectedTCPProtocol = errors.New("expected TCP protocol") ) From 4c542af29714708692f2ca8342e89a26ba25c90e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 27 Jul 2024 00:02:49 +0300 Subject: [PATCH 007/155] remove connected subnet msg --- chains/manager.go | 11 +- message/internal_msg_builder.go | 26 ---- message/ops.go | 5 - snow/networking/handler/handler.go | 7 - snow/networking/handler/handler_test.go | 99 -------------- snow/networking/handler/health_test.go | 1 - snow/networking/handler/message_queue.go | 2 +- snow/networking/router/chain_router.go | 61 --------- snow/networking/router/chain_router_test.go | 123 ------------------ snow/networking/sender/sender_test.go | 3 - snow/validators/mock_subnet_connector.go | 55 -------- snow/validators/subnet_connector.go | 16 --- snow/validators/unhandled_subnet_connector.go | 23 ---- vms/platformvm/vm.go | 11 +- vms/platformvm/vm_test.go | 1 - 15 files changed, 5 insertions(+), 439 deletions(-) delete mode 100644 snow/validators/mock_subnet_connector.go delete mode 100644 snow/validators/subnet_connector.go delete mode 100644 snow/validators/unhandled_subnet_connector.go diff --git a/chains/manager.go b/chains/manager.go index bdc6d0ef0180..a87396d1b65e 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -897,7 +897,6 @@ func (m *manager) createAvalancheChain( m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, - validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector sb, connectedValidators, peerTracker, @@ -1108,8 +1107,7 @@ func (m *manager) createSnowmanChain( } var ( - bootstrapFunc func() - subnetConnector = validators.UnhandledSubnetConnector + bootstrapFunc func() ) // If [m.validatorState] is nil then we are creating the P-Chain. Since the // P-Chain is the first chain to be created, we can use it to initialize @@ -1147,12 +1145,6 @@ func (m *manager) createSnowmanChain( bootstrapFunc = func() { close(m.unblockChainCreatorCh) } - - // Set up the subnet connector for the P-Chain - subnetConnector, ok = vm.(validators.SubnetConnector) - if !ok { - return nil, fmt.Errorf("expected validators.SubnetConnector but got %T", vm) - } } // Initialize the ProposerVM and the vm wrapped inside it @@ -1295,7 +1287,6 @@ func (m *manager) createSnowmanChain( m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, - subnetConnector, sb, connectedValidators, peerTracker, diff --git a/message/internal_msg_builder.go b/message/internal_msg_builder.go index 141dabaae414..bd71b60bf4ca 100644 --- a/message/internal_msg_builder.go +++ b/message/internal_msg_builder.go @@ -493,32 +493,6 @@ func InternalConnected(nodeID ids.NodeID, nodeVersion *version.Application) Inbo } } -// ConnectedSubnet contains the subnet ID of the subnet that the node is -// connected to. -type ConnectedSubnet struct { - SubnetID ids.ID `json:"subnet_id,omitempty"` -} - -func (m *ConnectedSubnet) String() string { - return fmt.Sprintf( - "SubnetID: %s", - m.SubnetID, - ) -} - -// InternalConnectedSubnet returns a message that indicates the node with [nodeID] is -// connected to the subnet with the given [subnetID]. -func InternalConnectedSubnet(nodeID ids.NodeID, subnetID ids.ID) InboundMessage { - return &inboundMessage{ - nodeID: nodeID, - op: ConnectedSubnetOp, - message: &ConnectedSubnet{ - SubnetID: subnetID, - }, - expiration: mockable.MaxTime, - } -} - type Disconnected struct{} func (Disconnected) String() string { diff --git a/message/ops.go b/message/ops.go index 11c69087b5f8..4e6fff95426a 100644 --- a/message/ops.go +++ b/message/ops.go @@ -60,7 +60,6 @@ const ( CrossChainAppResponseOp // Internal: ConnectedOp - ConnectedSubnetOp DisconnectedOp NotifyOp GossipRequestOp @@ -120,7 +119,6 @@ var ( CrossChainAppErrorOp, CrossChainAppResponseOp, ConnectedOp, - ConnectedSubnetOp, DisconnectedOp, NotifyOp, GossipRequestOp, @@ -158,7 +156,6 @@ var ( ChitsOp, // Internal ConnectedOp, - ConnectedSubnetOp, DisconnectedOp, } @@ -281,8 +278,6 @@ func (op Op) String() string { // Internal case ConnectedOp: return "connected" - case ConnectedSubnetOp: - return "connected_subnet" case DisconnectedOp: return "disconnected" case NotifyOp: diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index 1eb42ca0dcdc..c9176ef92343 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -118,8 +118,6 @@ type handler struct { // Closed when this handler and [engine] are done shutting down closed chan struct{} - subnetConnector validators.SubnetConnector - subnet subnets.Subnet // Tracks the peers that are currently connected to this subnet @@ -136,7 +134,6 @@ func New( gossipFrequency time.Duration, threadPoolSize int, resourceTracker tracker.ResourceTracker, - subnetConnector validators.SubnetConnector, subnet subnets.Subnet, peerTracker commontracker.Peers, p2pTracker *p2p.PeerTracker, @@ -152,7 +149,6 @@ func New( closingChan: make(chan struct{}), closed: make(chan struct{}), resourceTracker: resourceTracker, - subnetConnector: subnetConnector, subnet: subnet, peerTracker: peerTracker, p2pTracker: p2pTracker, @@ -769,9 +765,6 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { h.p2pTracker.Connected(nodeID, msg.NodeVersion) return engine.Connected(ctx, nodeID, msg.NodeVersion) - case *message.ConnectedSubnet: - return h.subnetConnector.ConnectedSubnet(ctx, nodeID, msg.SubnetID) - case *message.Disconnected: err := h.peerTracker.Disconnected(ctx, nodeID) if err != nil { diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index 929c51780c24..095d09592cec 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -12,7 +12,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" @@ -73,7 +72,6 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, @@ -180,7 +178,6 @@ func TestHandlerClosesOnError(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, @@ -283,7 +280,6 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { 1, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, @@ -374,7 +370,6 @@ func TestHandlerDispatchInternal(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, @@ -423,98 +418,6 @@ func TestHandlerDispatchInternal(t *testing.T) { wg.Wait() } -func TestHandlerSubnetConnector(t *testing.T) { - require := require.New(t) - - snowCtx := snowtest.Context(t, snowtest.CChainID) - ctx := snowtest.ConsensusContext(snowCtx) - vdrs := validators.NewManager() - require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - - resourceTracker, err := tracker.NewResourceTracker( - prometheus.NewRegistry(), - resource.NoUsage, - meter.ContinuousFactory{}, - time.Second, - ) - require.NoError(err) - ctrl := gomock.NewController(t) - connector := validators.NewMockSubnetConnector(ctrl) - - nodeID := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() - - peerTracker, err := p2p.NewPeerTracker( - logging.NoLog{}, - "", - prometheus.NewRegistry(), - nil, - version.CurrentApp, - ) - require.NoError(err) - - handler, err := New( - ctx, - vdrs, - nil, - time.Second, - testThreadPoolSize, - resourceTracker, - connector, - subnets.New(ctx.NodeID, subnets.Config{}), - commontracker.NewPeers(), - peerTracker, - prometheus.NewRegistry(), - ) - require.NoError(err) - - bootstrapper := &common.BootstrapperTest{ - EngineTest: common.EngineTest{ - T: t, - }, - } - bootstrapper.Default(false) - - engine := &common.EngineTest{T: t} - engine.Default(false) - engine.ContextF = func() *snow.ConsensusContext { - return ctx - } - - handler.SetEngineManager(&EngineManager{ - Snowman: &Engine{ - Bootstrapper: bootstrapper, - Consensus: engine, - }, - }) - ctx.State.Set(snow.EngineState{ - Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, - State: snow.NormalOp, // assumed bootstrap is done - }) - - bootstrapper.StartF = func(context.Context, uint32) error { - return nil - } - - handler.Start(context.Background(), false) - - // Handler should call subnet connector when ConnectedSubnet message is received - var wg sync.WaitGroup - connector.EXPECT().ConnectedSubnet(gomock.Any(), nodeID, subnetID).Do( - func(context.Context, ids.NodeID, ids.ID) { - wg.Done() - }) - - wg.Add(1) - defer wg.Wait() - - subnetInboundMessage := Message{ - InboundMessage: message.InternalConnectedSubnet(nodeID, subnetID), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - handler.Push(context.Background(), subnetInboundMessage) -} - // Tests that messages are routed to the correct engine type func TestDynamicEngineTypeDispatch(t *testing.T) { tests := []struct { @@ -642,7 +545,6 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ids.EmptyNodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, @@ -725,7 +627,6 @@ func TestHandlerStartError(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - nil, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), peerTracker, diff --git a/snow/networking/handler/health_test.go b/snow/networking/handler/health_test.go index 789d3464187e..a1cce533beee 100644 --- a/snow/networking/handler/health_test.go +++ b/snow/networking/handler/health_test.go @@ -89,7 +89,6 @@ func TestHealthCheckSubnet(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, sb, peerTracker, p2pTracker, diff --git a/snow/networking/handler/message_queue.go b/snow/networking/handler/message_queue.go index 4d632c62d77e..fbf362c86f73 100644 --- a/snow/networking/handler/message_queue.go +++ b/snow/networking/handler/message_queue.go @@ -203,7 +203,7 @@ func (m *messageQueue) Shutdown() { // canPop will return true for at least one message in [m.msgs] func (m *messageQueue) canPop(msg message.InboundMessage) bool { // Always pop connected and disconnected messages. - if op := msg.Op(); op == message.ConnectedOp || op == message.DisconnectedOp || op == message.ConnectedSubnetOp { + if op := msg.Op(); op == message.ConnectedOp || op == message.DisconnectedOp { return true } diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index 6af0984afc3f..b125800d5ff8 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -50,9 +50,6 @@ type peer struct { version *version.Application // The subnets that this peer is currently tracking trackedSubnets set.Set[ids.ID] - // The subnets that this peer actually has a connection to. - // This is a subset of trackedSubnets. - connectedSubnets set.Set[ids.ID] } // ChainRouter routes incoming messages from the validator network @@ -467,11 +464,6 @@ func (cr *ChainRouter) AddChain(ctx context.Context, chain handler.Handler) { if _, benched := cr.benched[cr.myNodeID]; benched { return } - - myself := cr.peers[cr.myNodeID] - for subnetID := range myself.trackedSubnets { - cr.connectedSubnet(myself, cr.myNodeID, subnetID) - } } // Connected routes an incoming notification that a validator was just connected @@ -526,7 +518,6 @@ func (cr *ChainRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Applica } } - cr.connectedSubnet(connectedPeer, nodeID, subnetID) } // Disconnected routes an incoming notification that a validator was connected @@ -603,8 +594,6 @@ func (cr *ChainRouter) Benched(chainID ids.ID, nodeID ids.NodeID) { }) } } - - peer.connectedSubnets.Clear() } // Unbenched routes an incoming notification that a validator was just unbenched @@ -647,13 +636,6 @@ func (cr *ChainRouter) Unbenched(chainID ids.ID, nodeID ids.NodeID) { }) } } - - // This will unbench the node from all its subnets. - // We handle this case separately because the node may have been benched on - // a subnet that has no chains. - for subnetID := range peer.trackedSubnets { - cr.connectedSubnet(peer, nodeID, subnetID) - } } // HealthCheck returns results of router health checks. Returns: @@ -758,46 +740,3 @@ func (cr *ChainRouter) clearRequest( cr.metrics.outstandingRequests.Set(float64(cr.timedRequests.Len())) return uniqueRequestID, &request } - -// connectedSubnet pushes an InternalSubnetConnected message with [nodeID] and -// [subnetID] to the P-chain. This should be called when a node is either first -// connecting to [subnetID] or when a node that was already connected is -// unbenched on [subnetID]. This is a noop if [subnetID] is the Primary Network -// or if the peer is already marked as connected to the subnet. -// Invariant: should be called after *message.Connected is pushed to the P-chain -// Invariant: should be called after the P-chain was provided in [AddChain] -func (cr *ChainRouter) connectedSubnet(peer *peer, nodeID ids.NodeID, subnetID ids.ID) { - // if connected to primary network, we can skip this - // because Connected has its own internal message - if subnetID == constants.PrimaryNetworkID { - return - } - - // peer already connected to this subnet - if peer.connectedSubnets.Contains(subnetID) { - return - } - - msg := message.InternalConnectedSubnet(nodeID, subnetID) - // We only push this message to the P-chain because it is the only chain - // that cares about the connectivity of all subnets. Others chains learn - // about the connectivity of their own subnet when they receive a - // *message.Connected. - platformChain, ok := cr.chainHandlers[constants.PlatformChainID] - if !ok { - cr.log.Error("trying to issue InternalConnectedSubnet message, but platform chain is not registered", - zap.Stringer("nodeID", nodeID), - zap.Stringer("subnetID", subnetID), - ) - return - } - platformChain.Push( - context.TODO(), - handler.Message{ - InboundMessage: msg, - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, - }, - ) - - peer.connectedSubnets.Add(subnetID) -} diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 7472de1fc016..2039fa46c52c 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -25,7 +25,6 @@ import ( "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" @@ -109,7 +108,6 @@ func TestShutdown(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(chainCtx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, @@ -235,7 +233,6 @@ func TestConnectedAfterShutdownErrorLogRegression(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(chainCtx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, @@ -368,7 +365,6 @@ func TestShutdownTimesOut(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, @@ -538,7 +534,6 @@ func TestRouterTimeout(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, @@ -1121,7 +1116,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, sb, commontracker.NewPeers(), p2pTracker, @@ -1211,121 +1205,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { require.True(calledF) // should be called since this is a validator request } -func TestConnectedSubnet(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - tm, err := timeout.NewManager( - &timer.AdaptiveTimeoutConfig{ - InitialTimeout: 3 * time.Second, - MinimumTimeout: 3 * time.Second, - MaximumTimeout: 5 * time.Minute, - TimeoutCoefficient: 1, - TimeoutHalflife: 5 * time.Minute, - }, - benchlist.NewNoBenchlist(), - prometheus.NewRegistry(), - prometheus.NewRegistry(), - ) - require.NoError(err) - - go tm.Dispatch() - defer tm.Stop() - - // Create chain router - myNodeID := ids.GenerateTestNodeID() - peerNodeID := ids.GenerateTestNodeID() - subnetID0 := ids.GenerateTestID() - subnetID1 := ids.GenerateTestID() - trackedSubnets := set.Of(subnetID0, subnetID1) - chainRouter := ChainRouter{} - require.NoError(chainRouter.Initialize( - myNodeID, - logging.NoLog{}, - tm, - time.Millisecond, - set.Set[ids.ID]{}, - true, - trackedSubnets, - nil, - HealthConfig{}, - prometheus.NewRegistry(), - )) - - // Create bootstrapper, engine and handler - snowCtx := snowtest.Context(t, snowtest.PChainID) - ctx := snowtest.ConsensusContext(snowCtx) - ctx.Executing.Set(false) - ctx.State.Set(snow.EngineState{ - Type: engineType, - State: snow.NormalOp, - }) - - myConnectedMsg := handler.Message{ - InboundMessage: message.InternalConnected(myNodeID, version.CurrentApp), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - mySubnetConnectedMsg0 := handler.Message{ - InboundMessage: message.InternalConnectedSubnet(myNodeID, subnetID0), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - mySubnetConnectedMsg1 := handler.Message{ - InboundMessage: message.InternalConnectedSubnet(myNodeID, subnetID1), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - - platformHandler := handler.NewMockHandler(ctrl) - platformHandler.EXPECT().Context().Return(ctx).AnyTimes() - platformHandler.EXPECT().SetOnStopped(gomock.Any()).AnyTimes() - platformHandler.EXPECT().Push(gomock.Any(), myConnectedMsg).Times(1) - platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg0).Times(1) - platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg1).Times(1) - - chainRouter.AddChain(context.Background(), platformHandler) - - peerConnectedMsg := handler.Message{ - InboundMessage: message.InternalConnected(peerNodeID, version.CurrentApp), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - platformHandler.EXPECT().Push(gomock.Any(), peerConnectedMsg).Times(1) - chainRouter.Connected(peerNodeID, version.CurrentApp, constants.PrimaryNetworkID) - - peerSubnetConnectedMsg0 := handler.Message{ - InboundMessage: message.InternalConnectedSubnet(peerNodeID, subnetID0), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - platformHandler.EXPECT().Push(gomock.Any(), peerSubnetConnectedMsg0).Times(1) - chainRouter.Connected(peerNodeID, version.CurrentApp, subnetID0) - - myDisconnectedMsg := handler.Message{ - InboundMessage: message.InternalDisconnected(myNodeID), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - platformHandler.EXPECT().Push(gomock.Any(), myDisconnectedMsg).Times(1) - chainRouter.Benched(constants.PlatformChainID, myNodeID) - - peerDisconnectedMsg := handler.Message{ - InboundMessage: message.InternalDisconnected(peerNodeID), - EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, - } - platformHandler.EXPECT().Push(gomock.Any(), peerDisconnectedMsg).Times(1) - chainRouter.Benched(constants.PlatformChainID, peerNodeID) - - platformHandler.EXPECT().Push(gomock.Any(), myConnectedMsg).Times(1) - platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg0).Times(1) - platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg1).Times(1) - - chainRouter.Unbenched(constants.PlatformChainID, myNodeID) - - platformHandler.EXPECT().Push(gomock.Any(), peerConnectedMsg).Times(1) - platformHandler.EXPECT().Push(gomock.Any(), peerSubnetConnectedMsg0).Times(1) - - chainRouter.Unbenched(constants.PlatformChainID, peerNodeID) - - platformHandler.EXPECT().Push(gomock.Any(), peerDisconnectedMsg).Times(1) - chainRouter.Disconnected(peerNodeID) -} - func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { require := require.New(t) @@ -1402,7 +1281,6 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, sb, commontracker.NewPeers(), p2pTracker, @@ -1742,7 +1620,6 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 34f138f6db21..0402bb636505 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -128,7 +128,6 @@ func TestTimeout(t *testing.T) { time.Hour, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, @@ -405,7 +404,6 @@ func TestReliableMessages(t *testing.T) { 1, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, @@ -562,7 +560,6 @@ func TestReliableMessagesToMyself(t *testing.T) { time.Second, testThreadPoolSize, resourceTracker, - validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), p2pTracker, diff --git a/snow/validators/mock_subnet_connector.go b/snow/validators/mock_subnet_connector.go deleted file mode 100644 index b9f3ee0519b8..000000000000 --- a/snow/validators/mock_subnet_connector.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: SubnetConnector) -// -// Generated by this command: -// -// mockgen -package=validators -destination=snow/validators/mock_subnet_connector.go github.com/ava-labs/avalanchego/snow/validators SubnetConnector -// - -// Package validators is a generated GoMock package. -package validators - -import ( - context "context" - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - gomock "go.uber.org/mock/gomock" -) - -// MockSubnetConnector is a mock of SubnetConnector interface. -type MockSubnetConnector struct { - ctrl *gomock.Controller - recorder *MockSubnetConnectorMockRecorder -} - -// MockSubnetConnectorMockRecorder is the mock recorder for MockSubnetConnector. -type MockSubnetConnectorMockRecorder struct { - mock *MockSubnetConnector -} - -// NewMockSubnetConnector creates a new mock instance. -func NewMockSubnetConnector(ctrl *gomock.Controller) *MockSubnetConnector { - mock := &MockSubnetConnector{ctrl: ctrl} - mock.recorder = &MockSubnetConnectorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSubnetConnector) EXPECT() *MockSubnetConnectorMockRecorder { - return m.recorder -} - -// ConnectedSubnet mocks base method. -func (m *MockSubnetConnector) ConnectedSubnet(arg0 context.Context, arg1 ids.NodeID, arg2 ids.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ConnectedSubnet", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ConnectedSubnet indicates an expected call of ConnectedSubnet. -func (mr *MockSubnetConnectorMockRecorder) ConnectedSubnet(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConnectedSubnet", reflect.TypeOf((*MockSubnetConnector)(nil).ConnectedSubnet), arg0, arg1, arg2) -} diff --git a/snow/validators/subnet_connector.go b/snow/validators/subnet_connector.go deleted file mode 100644 index 06b02ff90820..000000000000 --- a/snow/validators/subnet_connector.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package validators - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" -) - -// SubnetConnector represents a handler that is called when a connection is -// marked as connected to a subnet -type SubnetConnector interface { - ConnectedSubnet(ctx context.Context, nodeID ids.NodeID, subnetID ids.ID) error -} diff --git a/snow/validators/unhandled_subnet_connector.go b/snow/validators/unhandled_subnet_connector.go deleted file mode 100644 index 08447c4582ad..000000000000 --- a/snow/validators/unhandled_subnet_connector.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package validators - -import ( - "context" - "fmt" - - "github.com/ava-labs/avalanchego/ids" -) - -var UnhandledSubnetConnector SubnetConnector = &unhandledSubnetConnector{} - -type unhandledSubnetConnector struct{} - -func (unhandledSubnetConnector) ConnectedSubnet(_ context.Context, nodeID ids.NodeID, subnetID ids.ID) error { - return fmt.Errorf( - "unhandled ConnectedSubnet with nodeID=%q and subnetID=%q", - nodeID, - subnetID, - ) -} diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 6690488ef291..8244afe97e42 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -53,10 +53,9 @@ import ( ) var ( - _ snowmanblock.ChainVM = (*VM)(nil) - _ secp256k1fx.VM = (*VM)(nil) - _ validators.State = (*VM)(nil) - _ validators.SubnetConnector = (*VM)(nil) + _ snowmanblock.ChainVM = (*VM)(nil) + _ secp256k1fx.VM = (*VM)(nil) + _ validators.State = (*VM)(nil) ) type VM struct { @@ -467,10 +466,6 @@ func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version return vm.Network.Connected(ctx, nodeID, version) } -func (vm *VM) ConnectedSubnet(_ context.Context, nodeID ids.NodeID, subnetID ids.ID) error { - return nil -} - func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { if err := vm.uptimeManager.Disconnect(nodeID); err != nil { return err diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 277eed268013..5732b095e9f6 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1542,7 +1542,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { time.Hour, 2, cpuTracker, - vm, subnets.New(ctx.NodeID, subnets.Config{}), tracker.NewPeers(), peerTracker, From 9161864c3d83f76c16d2bbe70d09ec017b44ef49 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 27 Jul 2024 00:42:52 +0300 Subject: [PATCH 008/155] fix linter --- chains/manager.go | 4 +--- scripts/mocks.mockgen.txt | 1 - snow/networking/router/chain_router.go | 15 --------------- 3 files changed, 1 insertion(+), 19 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index a87396d1b65e..590a88ae5ac9 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -1106,9 +1106,7 @@ func (m *manager) createSnowmanChain( messageSender = sender.Trace(messageSender, m.Tracer) } - var ( - bootstrapFunc func() - ) + var bootstrapFunc func() // If [m.validatorState] is nil then we are creating the P-Chain. Since the // P-Chain is the first chain to be created, we can use it to initialize // required interfaces for the other chains diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index 139252666ab0..fb370aebff96 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -17,7 +17,6 @@ github.com/ava-labs/avalanchego/snow/networking/tracker=Targeter=snow/networking github.com/ava-labs/avalanchego/snow/networking/tracker=Tracker=snow/networking/tracker/mock_resource_tracker.go github.com/ava-labs/avalanchego/snow/uptime=Calculator=snow/uptime/mock_calculator.go github.com/ava-labs/avalanchego/snow/validators=State=snow/validators/mock_state.go -github.com/ava-labs/avalanchego/snow/validators=SubnetConnector=snow/validators/mock_subnet_connector.go github.com/ava-labs/avalanchego/utils/crypto/keychain=Ledger=utils/crypto/keychain/mock_ledger.go github.com/ava-labs/avalanchego/utils/filesystem=Reader=utils/filesystem/mock_io.go github.com/ava-labs/avalanchego/utils/hashing=Hasher=utils/hashing/mock_hasher.go diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index b125800d5ff8..01b0137407c4 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -450,20 +450,6 @@ func (cr *ChainRouter) AddChain(ctx context.Context, chain handler.Handler) { }, ) } - - // When we register the P-chain, we mark ourselves as connected on all of - // the subnets that we have tracked. - if chainID != constants.PlatformChainID { - return - } - - // If we have currently benched ourselves, we will mark ourselves as - // connected when we unbench. So skip connecting now. - // This is not "theoretically" possible, but keeping this here prevents us - // from keeping an invariant that we never bench ourselves. - if _, benched := cr.benched[cr.myNodeID]; benched { - return - } } // Connected routes an incoming notification that a validator was just connected @@ -517,7 +503,6 @@ func (cr *ChainRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Applica } } } - } // Disconnected routes an incoming notification that a validator was connected From f275c15575ebbcd97c9507205b272ed91f6d65d2 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 31 Jul 2024 17:23:33 +0300 Subject: [PATCH 009/155] rework on tests and reviews --- network/peer/peer_test.go | 4 +-- snow/uptime/manager.go | 14 ++++---- vms/platformvm/service.go | 26 +++++--------- vms/platformvm/service_test.go | 10 ++++++ vms/platformvm/state/state_test.go | 38 +++++++++++++++++++-- vms/platformvm/txs/executor/helpers_test.go | 5 --- 6 files changed, 64 insertions(+), 33 deletions(-) diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index ae2f86a74fc1..11b7b5590565 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -227,7 +227,7 @@ func TestPingUptimes(t *testing.T) { require.NoError(peer0.AwaitClosed(context.Background())) require.NoError(peer1.AwaitClosed(context.Background())) }() - pingMsg, err := sharedConfig.MessageCreator.Ping(0) + pingMsg, err := sharedConfig.MessageCreator.Ping(1) require.NoError(err) require.True(peer0.Send(context.Background(), pingMsg)) @@ -238,7 +238,7 @@ func TestPingUptimes(t *testing.T) { sendAndFlush(t, peer0, peer1) uptime := peer1.ObservedUptime() - require.Equal(uint32(0), uptime) + require.Equal(uint32(1), uptime) } func TestTrackedSubnets(t *testing.T) { diff --git a/snow/uptime/manager.go b/snow/uptime/manager.go index 93aaa4a1b139..52d4b4c7a088 100644 --- a/snow/uptime/manager.go +++ b/snow/uptime/manager.go @@ -40,7 +40,9 @@ type manager struct { state State connections map[ids.NodeID]time.Time // nodeID -> time - tracked bool + // Whether we have started tracking the uptime of the nodes + // This is used to avoid setting the uptime before we have started tracking + startedTracking bool } func NewManager(state State, clk *mockable.Clock) Manager { @@ -71,17 +73,17 @@ func (m *manager) StartTracking(nodeIDs []ids.NodeID) error { return err } } - m.tracked = true + m.startedTracking = true return nil } func (m *manager) StopTracking(nodeIDs []ids.NodeID) error { // TODO: this was not here before, should we add it? - if !m.tracked { + if !m.startedTracking { return nil } defer func() { - m.tracked = false + m.startedTracking = false }() now := m.clock.UnixTime() for _, nodeID := range nodeIDs { @@ -148,7 +150,7 @@ func (m *manager) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, return upDuration, lastUpdated, nil } - if !m.tracked { + if !m.startedTracking { durationOffline := now.Sub(lastUpdated) newUpDuration := upDuration + durationOffline return newUpDuration, now, nil @@ -202,7 +204,7 @@ func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.T // updateUptime updates the uptime of the node on the state by the amount // of time that the node has been connected. func (m *manager) updateUptime(nodeID ids.NodeID) error { - if !m.tracked { + if !m.startedTracking { return nil } newDuration, newLastUpdated, err := m.CalculateUptime(nodeID) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 1bb82e1a1203..501cb743fc6b 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -836,10 +836,17 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato // TODO: decide whether we want to keep connected for subnet validators // it should be available at this point if args.SubnetID == constants.PrimaryNetworkID { - currentUptime, isConnected, err := s.getAPIUptime(currentStaker) + rawUptime, err := s.vm.uptimeManager.CalculateUptimePercentFrom(currentStaker.NodeID, currentStaker.StartTime) if err != nil { return err } + // Transform this to a percentage (0-100) to make it consistent + // with observedUptime in info.peers API + currentUptime := avajson.Float32(rawUptime * 100) + if err != nil { + return err + } + isConnected := s.vm.uptimeManager.IsConnected(currentStaker.NodeID) connected = &isConnected uptime = ¤tUptime } @@ -1828,23 +1835,6 @@ func (s *Service) GetBlockByHeight(_ *http.Request, args *api.GetBlockByHeightAr return err } -// Returns: -// 1) the uptime of a validator in the API format -// 2) whether the validator is currently connected -// 3) an error if one occurred -func (s *Service) getAPIUptime(staker *state.Staker) (avajson.Float32, bool, error) { - rawUptime, err := s.vm.uptimeManager.CalculateUptimePercentFrom(staker.NodeID, staker.StartTime) - if err != nil { - return 0, false, err - } - connected := s.vm.uptimeManager.IsConnected(staker.NodeID) - - // Transform this to a percentage (0-100) to make it consistent - // with observedUptime in info.peers API - uptime := avajson.Float32(rawUptime * 100) - return uptime, connected, nil -} - func (s *Service) getAPIOwner(owner *secp256k1fx.OutputOwners) (*platformapi.Owner, error) { apiOwner := &platformapi.Owner{ Locktime: avajson.Uint64(owner.Locktime), diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index a3b2e743c9bc..1a688579b736 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -32,6 +32,8 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/signer" @@ -602,6 +604,12 @@ func TestGetCurrentValidators(t *testing.T) { args := GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} response := GetCurrentValidatorsReply{} + connectedIDs := set.NewSet[ids.NodeID](len(genesis.Validators) - 1) + for _, vdr := range genesis.Validators[:len(genesis.Validators)-1] { + connectedIDs.Add(vdr.NodeID) + require.NoError(service.vm.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + } + require.NoError(service.GetCurrentValidators(nil, &args, &response)) require.Len(response.Validators, len(genesis.Validators)) @@ -615,6 +623,8 @@ func TestGetCurrentValidators(t *testing.T) { require.Equal(vdr.EndTime, gotVdr.EndTime) require.Equal(vdr.StartTime, gotVdr.StartTime) + require.Equal(connectedIDs.Contains(vdr.NodeID), *gotVdr.Connected) + require.EqualValues(100, *gotVdr.Uptime) found = true } require.True(found, "expected validators to contain %s but didn't", vdr.NodeID) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 517981552042..c5243c63bf10 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -106,6 +106,9 @@ func TestPersistStakers(t *testing.T) { // with the right weight and showing the BLS key checkValidatorsSet func(*require.Assertions, *state, *Staker) + // Check that node duly track stakers uptimes + checkValidatorUptimes func(*require.Assertions, *state, *Staker) + // Check whether weight/bls keys diffs are duly stored checkDiffs func(*require.Assertions, *state, *Staker, uint64) }{ @@ -156,6 +159,17 @@ func TestPersistStakers(t *testing.T) { Weight: staker.Weight, }, valOut) }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + upDuration, lastUpdated, err := s.GetUptime(staker.NodeID) + if staker.SubnetID != constants.PrimaryNetworkID { + // only primary network validators have uptimes + r.ErrorIs(err, database.ErrNotFound) + } else { + r.NoError(err) + r.Equal(upDuration, time.Duration(0)) + r.Equal(lastUpdated, staker.StartTime) + } + }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) @@ -252,6 +266,7 @@ func TestPersistStakers(t *testing.T) { r.Equal(valOut.NodeID, staker.NodeID) r.Equal(valOut.Weight, val.Weight+staker.Weight) }, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // validator's weight must increase of delegator's weight amount weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) @@ -303,6 +318,11 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // pending validators uptime is not tracked + _, _, err := s.GetUptime(staker.NodeID) + r.ErrorIs(err, database.ErrNotFound) + }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // pending validators weight diff and bls diffs are not stored _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) @@ -373,7 +393,8 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, + checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, }, "delete current validator": { storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { @@ -419,6 +440,11 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // uptimes of delete validators are dropped + _, _, err := s.GetUptime(staker.NodeID) + r.ErrorIs(err, database.ErrNotFound) + }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) @@ -515,6 +541,7 @@ func TestPersistStakers(t *testing.T) { r.Equal(valOut.NodeID, staker.NodeID) r.Equal(valOut.Weight, val.Weight) }, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // validator's weight must decrease of delegator's weight amount weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) @@ -568,6 +595,10 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + _, _, err := s.GetUptime(staker.NodeID) + r.ErrorIs(err, database.ErrNotFound) + }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.ErrorIs(err, database.ErrNotFound) @@ -635,7 +666,8 @@ func TestPersistStakers(t *testing.T) { valsMap := s.cfg.Validators.GetMap(staker.SubnetID) r.Empty(valsMap) }, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, + checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, }, } @@ -653,6 +685,7 @@ func TestPersistStakers(t *testing.T) { // check all relevant data are stored test.checkStakerInState(require, state, staker) test.checkValidatorsSet(require, state, staker) + test.checkValidatorUptimes(require, state, staker) test.checkDiffs(require, state, staker, 0 /*height*/) // rebuild the state @@ -666,6 +699,7 @@ func TestPersistStakers(t *testing.T) { // check again that all relevant data are still available in rebuilt state test.checkStakerInState(require, state, staker) test.checkValidatorsSet(require, state, staker) + test.checkValidatorUptimes(require, state, staker) test.checkDiffs(require, state, staker, 0 /*height*/) }) } diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index a5f2d6fcca59..430644571ae2 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -189,11 +189,6 @@ func newEnvironment(t *testing.T, f fork) *environment { require.NoError(env.uptimes.StopTracking(validatorIDs)) - for subnetID := range env.config.TrackedSubnets { - validatorIDs := env.config.Validators.GetValidatorIDs(subnetID) - - require.NoError(env.uptimes.StopTracking(validatorIDs)) - } env.state.SetHeight(math.MaxUint64) require.NoError(env.state.Commit()) } From d9355cc0f5a634738272228a564c6ac85addbf75 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 31 Jul 2024 17:45:07 +0300 Subject: [PATCH 010/155] fix linter --- vms/platformvm/service_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 1a688579b736..447e21bcb901 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -624,7 +624,7 @@ func TestGetCurrentValidators(t *testing.T) { require.Equal(vdr.EndTime, gotVdr.EndTime) require.Equal(vdr.StartTime, gotVdr.StartTime) require.Equal(connectedIDs.Contains(vdr.NodeID), *gotVdr.Connected) - require.EqualValues(100, *gotVdr.Uptime) + require.Equal(avajson.Float32(100), *gotVdr.Uptime) found = true } require.True(found, "expected validators to contain %s but didn't", vdr.NodeID) From 46501ad6a3778ce636059dd3b0a8236494b07eec Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 6 Aug 2024 11:00:08 +0300 Subject: [PATCH 011/155] Update proto/p2p/p2p.proto Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- proto/p2p/p2p.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proto/p2p/p2p.proto b/proto/p2p/p2p.proto index 5d0a3a31a1eb..a309226973ee 100644 --- a/proto/p2p/p2p.proto +++ b/proto/p2p/p2p.proto @@ -64,7 +64,7 @@ message Message { message Ping { // Uptime percentage on the primary network [0, 100] uint32 uptime = 1; - reserved 2; // Until E upgrade is activated. + reserved 2; // Until Etna upgrade is activated. } // Pong is sent in response to a Ping. From b7459bdf40e5042a52a869fbfc8cf93c15038793 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 6 Aug 2024 11:01:35 +0300 Subject: [PATCH 012/155] fix comment Signed-off-by: Ceyhun Onur --- snow/uptime/manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snow/uptime/manager.go b/snow/uptime/manager.go index 52d4b4c7a088..284fcdea7614 100644 --- a/snow/uptime/manager.go +++ b/snow/uptime/manager.go @@ -39,7 +39,7 @@ type manager struct { clock *mockable.Clock state State - connections map[ids.NodeID]time.Time // nodeID -> time + connections map[ids.NodeID]time.Time // nodeID -> connected at // Whether we have started tracking the uptime of the nodes // This is used to avoid setting the uptime before we have started tracking startedTracking bool From b81b73719cbe9b9fa6a70beb665cf9837447dce8 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 6 Aug 2024 11:20:05 +0300 Subject: [PATCH 013/155] Update vms/platformvm/service_test.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- vms/platformvm/service_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 447e21bcb901..588d15b3e72c 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -604,6 +604,7 @@ func TestGetCurrentValidators(t *testing.T) { args := GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} response := GetCurrentValidatorsReply{} + // Connect to nodes other than the last node in genesis.Validators, which is the node being tested. connectedIDs := set.NewSet[ids.NodeID](len(genesis.Validators) - 1) for _, vdr := range genesis.Validators[:len(genesis.Validators)-1] { connectedIDs.Add(vdr.NodeID) From f6bb38368b729cf4ba5ef17ec69b799ea8d27c1c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 6 Aug 2024 11:20:38 +0300 Subject: [PATCH 014/155] use disconnect in stop tracking --- snow/uptime/manager.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snow/uptime/manager.go b/snow/uptime/manager.go index 284fcdea7614..413a660ec172 100644 --- a/snow/uptime/manager.go +++ b/snow/uptime/manager.go @@ -90,9 +90,7 @@ func (m *manager) StopTracking(nodeIDs []ids.NodeID) error { // If the node is already connected, then we can just // update the uptime in the state and remove the connection if _, isConnected := m.connections[nodeID]; isConnected { - err := m.updateUptime(nodeID) - delete(m.connections, nodeID) - if err != nil { + if err := m.disconnect(nodeID); err != nil { return err } continue @@ -129,6 +127,10 @@ func (m *manager) IsConnected(nodeID ids.NodeID) bool { } func (m *manager) Disconnect(nodeID ids.NodeID) error { + return m.disconnect(nodeID) +} + +func (m *manager) disconnect(nodeID ids.NodeID) error { if err := m.updateUptime(nodeID); err != nil { return err } From 7a6f7eb8a44372c1f15341433b487e0b480a4f45 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 6 Aug 2024 12:14:41 +0300 Subject: [PATCH 015/155] remove todo comment --- vms/platformvm/service.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 501cb743fc6b..4fb48eddb941 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -832,9 +832,6 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato delegationFee := avajson.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) var uptime *avajson.Float32 var connected *bool - // Only calculate uptime for primary network validators - // TODO: decide whether we want to keep connected for subnet validators - // it should be available at this point if args.SubnetID == constants.PrimaryNetworkID { rawUptime, err := s.vm.uptimeManager.CalculateUptimePercentFrom(currentStaker.NodeID, currentStaker.StartTime) if err != nil { From cba7a50cc4c4e77088d5e18eab3f0090dfd94cfd Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 8 Sep 2024 16:33:16 +0300 Subject: [PATCH 016/155] remove unused err --- network/network.go | 1 - 1 file changed, 1 deletion(-) diff --git a/network/network.go b/network/network.go index 63f8c1f035b3..eab4ecca085e 100644 --- a/network/network.go +++ b/network/network.go @@ -52,7 +52,6 @@ var ( _ Network = (*network)(nil) errNotValidator = errors.New("node is not a validator") - errNotTracked = errors.New("subnet is not tracked") errExpectedProxy = errors.New("expected proxy") errExpectedTCPProtocol = errors.New("expected TCP protocol") errTrackingPrimaryNetwork = errors.New("cannot track primary network") From b4955d6f492929a85bdf3e40a45396a62aeecd99 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 8 Sep 2024 16:36:53 +0300 Subject: [PATCH 017/155] remove subnet connector mock --- .../validatorsmock/subnet_connector.go | 55 ------------------- 1 file changed, 55 deletions(-) delete mode 100644 snow/validators/validatorsmock/subnet_connector.go diff --git a/snow/validators/validatorsmock/subnet_connector.go b/snow/validators/validatorsmock/subnet_connector.go deleted file mode 100644 index 118a287ac68a..000000000000 --- a/snow/validators/validatorsmock/subnet_connector.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: SubnetConnector) -// -// Generated by this command: -// -// mockgen -package=validatorsmock -destination=snow/validators/validatorsmock/subnet_connector.go -mock_names=SubnetConnector=SubnetConnector github.com/ava-labs/avalanchego/snow/validators SubnetConnector -// - -// Package validatorsmock is a generated GoMock package. -package validatorsmock - -import ( - context "context" - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - gomock "go.uber.org/mock/gomock" -) - -// SubnetConnector is a mock of SubnetConnector interface. -type SubnetConnector struct { - ctrl *gomock.Controller - recorder *SubnetConnectorMockRecorder -} - -// SubnetConnectorMockRecorder is the mock recorder for SubnetConnector. -type SubnetConnectorMockRecorder struct { - mock *SubnetConnector -} - -// NewSubnetConnector creates a new mock instance. -func NewSubnetConnector(ctrl *gomock.Controller) *SubnetConnector { - mock := &SubnetConnector{ctrl: ctrl} - mock.recorder = &SubnetConnectorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *SubnetConnector) EXPECT() *SubnetConnectorMockRecorder { - return m.recorder -} - -// ConnectedSubnet mocks base method. -func (m *SubnetConnector) ConnectedSubnet(arg0 context.Context, arg1 ids.NodeID, arg2 ids.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ConnectedSubnet", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ConnectedSubnet indicates an expected call of ConnectedSubnet. -func (mr *SubnetConnectorMockRecorder) ConnectedSubnet(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConnectedSubnet", reflect.TypeOf((*SubnetConnector)(nil).ConnectedSubnet), arg0, arg1, arg2) -} From 1b228421b98b6a2a7a6d21e8b58f721ce88115b2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 14:43:39 -0400 Subject: [PATCH 018/155] WIP Add Expiry Replay Protection --- vms/platformvm/state/diff.go | 51 ++++++++++++ vms/platformvm/state/expiry.go | 116 ++++++++++++++++++++++++++++ vms/platformvm/state/expiry_test.go | 63 +++++++++++++++ vms/platformvm/state/mock_chain.go | 54 +++++++++++++ vms/platformvm/state/mock_diff.go | 54 +++++++++++++ vms/platformvm/state/mock_state.go | 54 +++++++++++++ vms/platformvm/state/state.go | 35 +++++++++ 7 files changed, 427 insertions(+) create mode 100644 vms/platformvm/state/expiry.go create mode 100644 vms/platformvm/state/expiry_test.go diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 16f7edf4435b..6f9e4fd0048d 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -41,6 +41,8 @@ type diff struct { // Subnet ID --> supply of native asset of the subnet currentSupply map[ids.ID]uint64 + expiryDiff *expiryDiff + currentStakerDiffs diffStakers // map of subnetID -> nodeID -> total accrued delegatee rewards modifiedDelegateeRewards map[ids.ID]map[ids.NodeID]uint64 @@ -77,6 +79,7 @@ func NewDiff( stateVersions: stateVersions, timestamp: parentState.GetTimestamp(), feeState: parentState.GetFeeState(), + expiryDiff: newExpiryDiff(), subnetOwners: make(map[ids.ID]fx.Owner), subnetManagers: make(map[ids.ID]chainIDAndAddr), }, nil @@ -136,6 +139,45 @@ func (d *diff) SetCurrentSupply(subnetID ids.ID, currentSupply uint64) { } } +func (d *diff) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + parentIterator, err := parentState.GetExpiryIterator() + if err != nil { + return nil, err + } + + return d.expiryDiff.getExpiryIterator(parentIterator), nil +} + +func (d *diff) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { + entry := ExpiryEntry{ + Timestamp: timestamp, + ValidationID: validationID, + } + if has, modified := d.expiryDiff.hasExpiry(entry); modified { + return has, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return false, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + return parentState.HasExpiry(timestamp, validationID) +} + +func (d *diff) PutExpiry(timestamp uint64, validationID ids.ID) { + d.expiryDiff.PutExpiry(timestamp, validationID) +} + +func (d *diff) DeleteExpiry(timestamp uint64, validationID ids.ID) { + d.expiryDiff.DeleteExpiry(timestamp, validationID) +} + func (d *diff) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { // If the validator was modified in this diff, return the modified // validator. @@ -440,6 +482,15 @@ func (d *diff) Apply(baseState Chain) error { for subnetID, supply := range d.currentSupply { baseState.SetCurrentSupply(subnetID, supply) } + addedExpiryIterator := iterator.FromTree(d.expiryDiff.added) + for addedExpiryIterator.Next() { + entry := addedExpiryIterator.Value() + baseState.PutExpiry(entry.Timestamp, entry.ValidationID) + } + addedExpiryIterator.Release() + for removed := range d.expiryDiff.removed { + baseState.DeleteExpiry(removed.Timestamp, removed.ValidationID) + } for _, subnetValidatorDiffs := range d.currentStakerDiffs.validatorDiffs { for _, validatorDiff := range subnetValidatorDiffs { switch validatorDiff.validatorStatus { diff --git a/vms/platformvm/state/expiry.go b/vms/platformvm/state/expiry.go new file mode 100644 index 000000000000..4faa59051331 --- /dev/null +++ b/vms/platformvm/state/expiry.go @@ -0,0 +1,116 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "encoding/binary" + "fmt" + + "github.com/google/btree" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/iterator" + "github.com/ava-labs/avalanchego/utils/set" +) + +// expiryKey = [timestamp] + [validationID] +const expiryKeyLength = database.Uint64Size + ids.IDLen + +var ( + errUnexpectedExpiryKeyLength = fmt.Errorf("expected expiry key length %d", expiryKeyLength) + + _ btree.LessFunc[ExpiryEntry] = ExpiryEntry.Less +) + +type Expiry interface { + GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) + HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) + PutExpiry(timestamp uint64, validationID ids.ID) + DeleteExpiry(timestamp uint64, validationID ids.ID) +} + +type ExpiryEntry struct { + Timestamp uint64 + ValidationID ids.ID +} + +func (e *ExpiryEntry) Marshal() []byte { + key := make([]byte, expiryKeyLength) + binary.BigEndian.PutUint64(key, e.Timestamp) + copy(key[database.Uint64Size:], e.ValidationID[:]) + return key +} + +func (e *ExpiryEntry) Unmarshal(data []byte) error { + if len(data) != expiryKeyLength { + return errUnexpectedExpiryKeyLength + } + + e.Timestamp = binary.BigEndian.Uint64(data) + copy(e.ValidationID[:], data[database.Uint64Size:]) + return nil +} + +func (e ExpiryEntry) Less(o ExpiryEntry) bool { + switch { + case e.Timestamp < o.Timestamp: + return true + case e.Timestamp > o.Timestamp: + return false + default: + return e.ValidationID.Compare(o.ValidationID) == -1 + } +} + +type expiryDiff struct { + added *btree.BTreeG[ExpiryEntry] + removed set.Set[ExpiryEntry] +} + +func newExpiryDiff() *expiryDiff { + return &expiryDiff{ + added: btree.NewG(defaultTreeDegree, ExpiryEntry.Less), + } +} + +func (e *expiryDiff) PutExpiry(timestamp uint64, validationID ids.ID) { + entry := ExpiryEntry{ + Timestamp: timestamp, + ValidationID: validationID, + } + e.added.ReplaceOrInsert(entry) + e.removed.Remove(entry) +} + +func (e *expiryDiff) DeleteExpiry(timestamp uint64, validationID ids.ID) { + entry := ExpiryEntry{ + Timestamp: timestamp, + ValidationID: validationID, + } + e.added.Delete(entry) + e.removed.Add(entry) +} + +func (e *expiryDiff) getExpiryIterator(parentIterator iterator.Iterator[ExpiryEntry]) iterator.Iterator[ExpiryEntry] { + return iterator.Filter( + iterator.Merge( + ExpiryEntry.Less, + parentIterator, + iterator.FromTree(e.added), + ), + e.removed.Contains, + ) +} + +func (e *expiryDiff) hasExpiry(entry ExpiryEntry) (bool, bool) { + switch { + case e.removed.Contains(entry): + return false, true + case e.added.Has(entry): + return true, true + default: + return false, false + } +} diff --git a/vms/platformvm/state/expiry_test.go b/vms/platformvm/state/expiry_test.go new file mode 100644 index 000000000000..db35b5c0e0b3 --- /dev/null +++ b/vms/platformvm/state/expiry_test.go @@ -0,0 +1,63 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + "github.com/thepudds/fzgen/fuzzer" +) + +func FuzzMarshalExpiryKey(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + var entry ExpiryEntry + fz := fuzzer.NewFuzzer(data) + fz.Fill(&entry) + + marshalledData := entry.Marshal() + + var parsedEntry ExpiryEntry + err := parsedEntry.Unmarshal(marshalledData) + require.NoError(err) + require.Equal(entry, parsedEntry) + }) +} + +func FuzzMarshalExpiryKeyIteration(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + var ( + entry0 ExpiryEntry + entry1 ExpiryEntry + ) + fz := fuzzer.NewFuzzer(data) + fz.Fill(&entry0, &entry1) + + key0 := entry0.Marshal() + key1 := entry1.Marshal() + require.Equal( + t, + entry0.Less(entry1), + bytes.Compare(key0, key1) == -1, + ) + }) +} + +func FuzzUnmarshalExpiryKey(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + var entry ExpiryEntry + if err := entry.Unmarshal(data); err != nil { + require.ErrorIs(err, errUnexpectedExpiryKeyLength) + return + } + + marshalledData := entry.Marshal() + require.Equal(data, marshalledData) + }) +} diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 727847a7c07f..2d86541eeb10 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -142,6 +142,18 @@ func (mr *MockChainMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentValidator), staker) } +// DeleteExpiry mocks base method. +func (m *MockChain) DeleteExpiry(timestamp uint64, validationID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteExpiry", timestamp, validationID) +} + +// DeleteExpiry indicates an expected call of DeleteExpiry. +func (mr *MockChainMockRecorder) DeleteExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockChain)(nil).DeleteExpiry), timestamp, validationID) +} + // DeletePendingDelegator mocks base method. func (m *MockChain) DeletePendingDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -253,6 +265,21 @@ func (mr *MockChainMockRecorder) GetDelegateeReward(subnetID, nodeID any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockChain)(nil).GetDelegateeReward), subnetID, nodeID) } +// GetExpiryIterator mocks base method. +func (m *MockChain) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExpiryIterator") + ret0, _ := ret[0].(iterator.Iterator[ExpiryEntry]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExpiryIterator indicates an expected call of GetExpiryIterator. +func (mr *MockChainMockRecorder) GetExpiryIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExpiryIterator", reflect.TypeOf((*MockChain)(nil).GetExpiryIterator)) +} + // GetFeeState mocks base method. func (m *MockChain) GetFeeState() gas.State { m.ctrl.T.Helper() @@ -403,6 +430,21 @@ func (mr *MockChainMockRecorder) GetUTXO(utxoID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), utxoID) } +// HasExpiry mocks base method. +func (m *MockChain) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasExpiry", timestamp, validationID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasExpiry indicates an expected call of HasExpiry. +func (mr *MockChainMockRecorder) HasExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockChain)(nil).HasExpiry), timestamp, validationID) +} + // PutCurrentDelegator mocks base method. func (m *MockChain) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -429,6 +471,18 @@ func (mr *MockChainMockRecorder) PutCurrentValidator(staker any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockChain)(nil).PutCurrentValidator), staker) } +// PutExpiry mocks base method. +func (m *MockChain) PutExpiry(timestamp uint64, validationID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutExpiry", timestamp, validationID) +} + +// PutExpiry indicates an expected call of PutExpiry. +func (mr *MockChainMockRecorder) PutExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockChain)(nil).PutExpiry), timestamp, validationID) +} + // PutPendingDelegator mocks base method. func (m *MockChain) PutPendingDelegator(staker *Staker) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index ccf6619b5b24..52b781649356 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -156,6 +156,18 @@ func (mr *MockDiffMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentValidator), staker) } +// DeleteExpiry mocks base method. +func (m *MockDiff) DeleteExpiry(timestamp uint64, validationID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteExpiry", timestamp, validationID) +} + +// DeleteExpiry indicates an expected call of DeleteExpiry. +func (mr *MockDiffMockRecorder) DeleteExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockDiff)(nil).DeleteExpiry), timestamp, validationID) +} + // DeletePendingDelegator mocks base method. func (m *MockDiff) DeletePendingDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -267,6 +279,21 @@ func (mr *MockDiffMockRecorder) GetDelegateeReward(subnetID, nodeID any) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).GetDelegateeReward), subnetID, nodeID) } +// GetExpiryIterator mocks base method. +func (m *MockDiff) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExpiryIterator") + ret0, _ := ret[0].(iterator.Iterator[ExpiryEntry]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExpiryIterator indicates an expected call of GetExpiryIterator. +func (mr *MockDiffMockRecorder) GetExpiryIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExpiryIterator", reflect.TypeOf((*MockDiff)(nil).GetExpiryIterator)) +} + // GetFeeState mocks base method. func (m *MockDiff) GetFeeState() gas.State { m.ctrl.T.Helper() @@ -417,6 +444,21 @@ func (mr *MockDiffMockRecorder) GetUTXO(utxoID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), utxoID) } +// HasExpiry mocks base method. +func (m *MockDiff) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasExpiry", timestamp, validationID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasExpiry indicates an expected call of HasExpiry. +func (mr *MockDiffMockRecorder) HasExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockDiff)(nil).HasExpiry), timestamp, validationID) +} + // PutCurrentDelegator mocks base method. func (m *MockDiff) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -443,6 +485,18 @@ func (mr *MockDiffMockRecorder) PutCurrentValidator(staker any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockDiff)(nil).PutCurrentValidator), staker) } +// PutExpiry mocks base method. +func (m *MockDiff) PutExpiry(timestamp uint64, validationID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutExpiry", timestamp, validationID) +} + +// PutExpiry indicates an expected call of PutExpiry. +func (mr *MockDiffMockRecorder) PutExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockDiff)(nil).PutExpiry), timestamp, validationID) +} + // PutPendingDelegator mocks base method. func (m *MockDiff) PutPendingDelegator(staker *Staker) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 527db5cf8a53..72503f3de408 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -257,6 +257,18 @@ func (mr *MockStateMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockState)(nil).DeleteCurrentValidator), staker) } +// DeleteExpiry mocks base method. +func (m *MockState) DeleteExpiry(timestamp uint64, validationID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteExpiry", timestamp, validationID) +} + +// DeleteExpiry indicates an expected call of DeleteExpiry. +func (mr *MockStateMockRecorder) DeleteExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockState)(nil).DeleteExpiry), timestamp, validationID) +} + // DeletePendingDelegator mocks base method. func (m *MockState) DeletePendingDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -398,6 +410,21 @@ func (mr *MockStateMockRecorder) GetDelegateeReward(subnetID, nodeID any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockState)(nil).GetDelegateeReward), subnetID, nodeID) } +// GetExpiryIterator mocks base method. +func (m *MockState) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExpiryIterator") + ret0, _ := ret[0].(iterator.Iterator[ExpiryEntry]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExpiryIterator indicates an expected call of GetExpiryIterator. +func (mr *MockStateMockRecorder) GetExpiryIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExpiryIterator", reflect.TypeOf((*MockState)(nil).GetExpiryIterator)) +} + // GetFeeState mocks base method. func (m *MockState) GetFeeState() gas.State { m.ctrl.T.Helper() @@ -638,6 +665,21 @@ func (mr *MockStateMockRecorder) GetUptime(nodeID, subnetID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), nodeID, subnetID) } +// HasExpiry mocks base method. +func (m *MockState) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasExpiry", timestamp, validationID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasExpiry indicates an expected call of HasExpiry. +func (mr *MockStateMockRecorder) HasExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockState)(nil).HasExpiry), timestamp, validationID) +} + // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -664,6 +706,18 @@ func (mr *MockStateMockRecorder) PutCurrentValidator(staker any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockState)(nil).PutCurrentValidator), staker) } +// PutExpiry mocks base method. +func (m *MockState) PutExpiry(timestamp uint64, validationID ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutExpiry", timestamp, validationID) +} + +// PutExpiry indicates an expected call of PutExpiry. +func (mr *MockStateMockRecorder) PutExpiry(timestamp, validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockState)(nil).PutExpiry), timestamp, validationID) +} + // PutPendingDelegator mocks base method. func (m *MockState) PutPendingDelegator(staker *Staker) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index b5288df75140..c7311a1ced34 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -82,6 +82,7 @@ var ( TransformedSubnetPrefix = []byte("transformedSubnet") SupplyPrefix = []byte("supply") ChainPrefix = []byte("chain") + ExpiryReplayProtectionPrefix = []byte("expiryReplayProtection") SingletonPrefix = []byte("singleton") TimestampKey = []byte("timestamp") @@ -96,6 +97,7 @@ var ( // Chain collects all methods to manage the state of the chain for block // execution. type Chain interface { + Expiry Stakers avax.UTXOAdder avax.UTXOGetter @@ -274,6 +276,8 @@ type stateBlk struct { * | '-. subnetID * | '-. list * | '-- txID -> nil + * |-. expiryReplayProtection + * | '-- timestamp + validationID -> nil * '-. singletons * |-- initializedKey -> nil * |-- blocksReindexedKey -> nil @@ -294,6 +298,9 @@ type state struct { baseDB *versiondb.Database + expiry *btree.BTreeG[ExpiryEntry] + expiryDiff *expiryDiff + currentStakers *baseStakers pendingStakers *baseStakers @@ -604,6 +611,9 @@ func New( blockCache: blockCache, blockDB: prefixdb.New(BlockPrefix, baseDB), + expiry: btree.NewG(defaultTreeDegree, ExpiryEntry.Less), + expiryDiff: newExpiryDiff(), + currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -678,6 +688,31 @@ func New( return s, nil } +func (s *state) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { + return s.expiryDiff.getExpiryIterator( + iterator.FromTree(s.expiry), + ), nil +} + +func (s *state) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { + entry := ExpiryEntry{ + Timestamp: timestamp, + ValidationID: validationID, + } + if has, modified := s.expiryDiff.hasExpiry(entry); modified { + return has, nil + } + return s.expiry.Has(entry), nil +} + +func (s *state) PutExpiry(timestamp uint64, validationID ids.ID) { + s.expiryDiff.PutExpiry(timestamp, validationID) +} + +func (s *state) DeleteExpiry(timestamp uint64, validationID ids.ID) { + s.expiryDiff.DeleteExpiry(timestamp, validationID) +} + func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { return s.currentStakers.GetValidator(subnetID, nodeID) } From ad83805bc4daf1de74cd5c7b25e664947878b318 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 14:49:05 -0400 Subject: [PATCH 019/155] rename --- vms/platformvm/state/expiry.go | 18 +++++++++--------- vms/platformvm/state/expiry_test.go | 8 ++++---- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/vms/platformvm/state/expiry.go b/vms/platformvm/state/expiry.go index 4faa59051331..2d27230c876e 100644 --- a/vms/platformvm/state/expiry.go +++ b/vms/platformvm/state/expiry.go @@ -15,11 +15,11 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -// expiryKey = [timestamp] + [validationID] -const expiryKeyLength = database.Uint64Size + ids.IDLen +// expiryEntry = [timestamp] + [validationID] +const expiryEntryLength = database.Uint64Size + ids.IDLen var ( - errUnexpectedExpiryKeyLength = fmt.Errorf("expected expiry key length %d", expiryKeyLength) + errUnexpectedExpiryEntryLength = fmt.Errorf("expected expiry entry length %d", expiryEntryLength) _ btree.LessFunc[ExpiryEntry] = ExpiryEntry.Less ) @@ -37,15 +37,15 @@ type ExpiryEntry struct { } func (e *ExpiryEntry) Marshal() []byte { - key := make([]byte, expiryKeyLength) - binary.BigEndian.PutUint64(key, e.Timestamp) - copy(key[database.Uint64Size:], e.ValidationID[:]) - return key + data := make([]byte, expiryEntryLength) + binary.BigEndian.PutUint64(data, e.Timestamp) + copy(data[database.Uint64Size:], e.ValidationID[:]) + return data } func (e *ExpiryEntry) Unmarshal(data []byte) error { - if len(data) != expiryKeyLength { - return errUnexpectedExpiryKeyLength + if len(data) != expiryEntryLength { + return errUnexpectedExpiryEntryLength } e.Timestamp = binary.BigEndian.Uint64(data) diff --git a/vms/platformvm/state/expiry_test.go b/vms/platformvm/state/expiry_test.go index db35b5c0e0b3..49c1bb934ca4 100644 --- a/vms/platformvm/state/expiry_test.go +++ b/vms/platformvm/state/expiry_test.go @@ -11,7 +11,7 @@ import ( "github.com/thepudds/fzgen/fuzzer" ) -func FuzzMarshalExpiryKey(f *testing.F) { +func FuzzExpiryEntryMarshal(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { require := require.New(t) @@ -28,7 +28,7 @@ func FuzzMarshalExpiryKey(f *testing.F) { }) } -func FuzzMarshalExpiryKeyIteration(f *testing.F) { +func FuzzExpiryEntryMarshalOrdering(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { var ( entry0 ExpiryEntry @@ -47,13 +47,13 @@ func FuzzMarshalExpiryKeyIteration(f *testing.F) { }) } -func FuzzUnmarshalExpiryKey(f *testing.F) { +func FuzzExpiryEntryUnmarshal(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { require := require.New(t) var entry ExpiryEntry if err := entry.Unmarshal(data); err != nil { - require.ErrorIs(err, errUnexpectedExpiryKeyLength) + require.ErrorIs(err, errUnexpectedExpiryEntryLength) return } From 7820c1731b73818302b891afb222782a0b4aa4f4 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 15:02:23 -0400 Subject: [PATCH 020/155] wip --- vms/platformvm/state/diff.go | 20 ++++++++------------ vms/platformvm/state/expiry.go | 19 ++++++------------- vms/platformvm/state/expiry_test.go | 2 +- vms/platformvm/state/mock_chain.go | 24 ++++++++++++------------ vms/platformvm/state/mock_diff.go | 24 ++++++++++++------------ vms/platformvm/state/mock_state.go | 24 ++++++++++++------------ vms/platformvm/state/state.go | 14 +++++--------- 7 files changed, 56 insertions(+), 71 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 6f9e4fd0048d..71993adedc00 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -153,11 +153,7 @@ func (d *diff) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { return d.expiryDiff.getExpiryIterator(parentIterator), nil } -func (d *diff) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { - entry := ExpiryEntry{ - Timestamp: timestamp, - ValidationID: validationID, - } +func (d *diff) HasExpiry(entry ExpiryEntry) (bool, error) { if has, modified := d.expiryDiff.hasExpiry(entry); modified { return has, nil } @@ -167,15 +163,15 @@ func (d *diff) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { return false, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) } - return parentState.HasExpiry(timestamp, validationID) + return parentState.HasExpiry(entry) } -func (d *diff) PutExpiry(timestamp uint64, validationID ids.ID) { - d.expiryDiff.PutExpiry(timestamp, validationID) +func (d *diff) PutExpiry(entry ExpiryEntry) { + d.expiryDiff.PutExpiry(entry) } -func (d *diff) DeleteExpiry(timestamp uint64, validationID ids.ID) { - d.expiryDiff.DeleteExpiry(timestamp, validationID) +func (d *diff) DeleteExpiry(entry ExpiryEntry) { + d.expiryDiff.DeleteExpiry(entry) } func (d *diff) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { @@ -485,11 +481,11 @@ func (d *diff) Apply(baseState Chain) error { addedExpiryIterator := iterator.FromTree(d.expiryDiff.added) for addedExpiryIterator.Next() { entry := addedExpiryIterator.Value() - baseState.PutExpiry(entry.Timestamp, entry.ValidationID) + baseState.PutExpiry(entry) } addedExpiryIterator.Release() for removed := range d.expiryDiff.removed { - baseState.DeleteExpiry(removed.Timestamp, removed.ValidationID) + baseState.DeleteExpiry(removed) } for _, subnetValidatorDiffs := range d.currentStakerDiffs.validatorDiffs { for _, validatorDiff := range subnetValidatorDiffs { diff --git a/vms/platformvm/state/expiry.go b/vms/platformvm/state/expiry.go index 2d27230c876e..dde2e9bbc91b 100644 --- a/vms/platformvm/state/expiry.go +++ b/vms/platformvm/state/expiry.go @@ -26,9 +26,9 @@ var ( type Expiry interface { GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) - HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) - PutExpiry(timestamp uint64, validationID ids.ID) - DeleteExpiry(timestamp uint64, validationID ids.ID) + HasExpiry(ExpiryEntry) (bool, error) + PutExpiry(ExpiryEntry) + DeleteExpiry(ExpiryEntry) } type ExpiryEntry struct { @@ -53,6 +53,7 @@ func (e *ExpiryEntry) Unmarshal(data []byte) error { return nil } +// Invariant: Less produces the same ordering as the marshalled bytes. func (e ExpiryEntry) Less(o ExpiryEntry) bool { switch { case e.Timestamp < o.Timestamp: @@ -75,20 +76,12 @@ func newExpiryDiff() *expiryDiff { } } -func (e *expiryDiff) PutExpiry(timestamp uint64, validationID ids.ID) { - entry := ExpiryEntry{ - Timestamp: timestamp, - ValidationID: validationID, - } +func (e *expiryDiff) PutExpiry(entry ExpiryEntry) { e.added.ReplaceOrInsert(entry) e.removed.Remove(entry) } -func (e *expiryDiff) DeleteExpiry(timestamp uint64, validationID ids.ID) { - entry := ExpiryEntry{ - Timestamp: timestamp, - ValidationID: validationID, - } +func (e *expiryDiff) DeleteExpiry(entry ExpiryEntry) { e.added.Delete(entry) e.removed.Add(entry) } diff --git a/vms/platformvm/state/expiry_test.go b/vms/platformvm/state/expiry_test.go index 49c1bb934ca4..7fa82ab06aac 100644 --- a/vms/platformvm/state/expiry_test.go +++ b/vms/platformvm/state/expiry_test.go @@ -28,7 +28,7 @@ func FuzzExpiryEntryMarshal(f *testing.F) { }) } -func FuzzExpiryEntryMarshalOrdering(f *testing.F) { +func FuzzExpiryEntryLessAndMarshalOrdering(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { var ( entry0 ExpiryEntry diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 2d86541eeb10..ac27471f3908 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -143,15 +143,15 @@ func (mr *MockChainMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call } // DeleteExpiry mocks base method. -func (m *MockChain) DeleteExpiry(timestamp uint64, validationID ids.ID) { +func (m *MockChain) DeleteExpiry(arg0 ExpiryEntry) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteExpiry", timestamp, validationID) + m.ctrl.Call(m, "DeleteExpiry", arg0) } // DeleteExpiry indicates an expected call of DeleteExpiry. -func (mr *MockChainMockRecorder) DeleteExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockChainMockRecorder) DeleteExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockChain)(nil).DeleteExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockChain)(nil).DeleteExpiry), arg0) } // DeletePendingDelegator mocks base method. @@ -431,18 +431,18 @@ func (mr *MockChainMockRecorder) GetUTXO(utxoID any) *gomock.Call { } // HasExpiry mocks base method. -func (m *MockChain) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { +func (m *MockChain) HasExpiry(arg0 ExpiryEntry) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasExpiry", timestamp, validationID) + ret := m.ctrl.Call(m, "HasExpiry", arg0) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // HasExpiry indicates an expected call of HasExpiry. -func (mr *MockChainMockRecorder) HasExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockChainMockRecorder) HasExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockChain)(nil).HasExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockChain)(nil).HasExpiry), arg0) } // PutCurrentDelegator mocks base method. @@ -472,15 +472,15 @@ func (mr *MockChainMockRecorder) PutCurrentValidator(staker any) *gomock.Call { } // PutExpiry mocks base method. -func (m *MockChain) PutExpiry(timestamp uint64, validationID ids.ID) { +func (m *MockChain) PutExpiry(arg0 ExpiryEntry) { m.ctrl.T.Helper() - m.ctrl.Call(m, "PutExpiry", timestamp, validationID) + m.ctrl.Call(m, "PutExpiry", arg0) } // PutExpiry indicates an expected call of PutExpiry. -func (mr *MockChainMockRecorder) PutExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockChainMockRecorder) PutExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockChain)(nil).PutExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockChain)(nil).PutExpiry), arg0) } // PutPendingDelegator mocks base method. diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index 52b781649356..aaa21605f508 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -157,15 +157,15 @@ func (mr *MockDiffMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call } // DeleteExpiry mocks base method. -func (m *MockDiff) DeleteExpiry(timestamp uint64, validationID ids.ID) { +func (m *MockDiff) DeleteExpiry(arg0 ExpiryEntry) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteExpiry", timestamp, validationID) + m.ctrl.Call(m, "DeleteExpiry", arg0) } // DeleteExpiry indicates an expected call of DeleteExpiry. -func (mr *MockDiffMockRecorder) DeleteExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockDiffMockRecorder) DeleteExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockDiff)(nil).DeleteExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockDiff)(nil).DeleteExpiry), arg0) } // DeletePendingDelegator mocks base method. @@ -445,18 +445,18 @@ func (mr *MockDiffMockRecorder) GetUTXO(utxoID any) *gomock.Call { } // HasExpiry mocks base method. -func (m *MockDiff) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { +func (m *MockDiff) HasExpiry(arg0 ExpiryEntry) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasExpiry", timestamp, validationID) + ret := m.ctrl.Call(m, "HasExpiry", arg0) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // HasExpiry indicates an expected call of HasExpiry. -func (mr *MockDiffMockRecorder) HasExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockDiffMockRecorder) HasExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockDiff)(nil).HasExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockDiff)(nil).HasExpiry), arg0) } // PutCurrentDelegator mocks base method. @@ -486,15 +486,15 @@ func (mr *MockDiffMockRecorder) PutCurrentValidator(staker any) *gomock.Call { } // PutExpiry mocks base method. -func (m *MockDiff) PutExpiry(timestamp uint64, validationID ids.ID) { +func (m *MockDiff) PutExpiry(arg0 ExpiryEntry) { m.ctrl.T.Helper() - m.ctrl.Call(m, "PutExpiry", timestamp, validationID) + m.ctrl.Call(m, "PutExpiry", arg0) } // PutExpiry indicates an expected call of PutExpiry. -func (mr *MockDiffMockRecorder) PutExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockDiffMockRecorder) PutExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockDiff)(nil).PutExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockDiff)(nil).PutExpiry), arg0) } // PutPendingDelegator mocks base method. diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 72503f3de408..6758c4e064c6 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -258,15 +258,15 @@ func (mr *MockStateMockRecorder) DeleteCurrentValidator(staker any) *gomock.Call } // DeleteExpiry mocks base method. -func (m *MockState) DeleteExpiry(timestamp uint64, validationID ids.ID) { +func (m *MockState) DeleteExpiry(arg0 ExpiryEntry) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteExpiry", timestamp, validationID) + m.ctrl.Call(m, "DeleteExpiry", arg0) } // DeleteExpiry indicates an expected call of DeleteExpiry. -func (mr *MockStateMockRecorder) DeleteExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockState)(nil).DeleteExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExpiry", reflect.TypeOf((*MockState)(nil).DeleteExpiry), arg0) } // DeletePendingDelegator mocks base method. @@ -666,18 +666,18 @@ func (mr *MockStateMockRecorder) GetUptime(nodeID, subnetID any) *gomock.Call { } // HasExpiry mocks base method. -func (m *MockState) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { +func (m *MockState) HasExpiry(arg0 ExpiryEntry) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasExpiry", timestamp, validationID) + ret := m.ctrl.Call(m, "HasExpiry", arg0) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // HasExpiry indicates an expected call of HasExpiry. -func (mr *MockStateMockRecorder) HasExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockStateMockRecorder) HasExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockState)(nil).HasExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockState)(nil).HasExpiry), arg0) } // PutCurrentDelegator mocks base method. @@ -707,15 +707,15 @@ func (mr *MockStateMockRecorder) PutCurrentValidator(staker any) *gomock.Call { } // PutExpiry mocks base method. -func (m *MockState) PutExpiry(timestamp uint64, validationID ids.ID) { +func (m *MockState) PutExpiry(arg0 ExpiryEntry) { m.ctrl.T.Helper() - m.ctrl.Call(m, "PutExpiry", timestamp, validationID) + m.ctrl.Call(m, "PutExpiry", arg0) } // PutExpiry indicates an expected call of PutExpiry. -func (mr *MockStateMockRecorder) PutExpiry(timestamp, validationID any) *gomock.Call { +func (mr *MockStateMockRecorder) PutExpiry(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockState)(nil).PutExpiry), timestamp, validationID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutExpiry", reflect.TypeOf((*MockState)(nil).PutExpiry), arg0) } // PutPendingDelegator mocks base method. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index c7311a1ced34..523ed9e485d9 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -694,23 +694,19 @@ func (s *state) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { ), nil } -func (s *state) HasExpiry(timestamp uint64, validationID ids.ID) (bool, error) { - entry := ExpiryEntry{ - Timestamp: timestamp, - ValidationID: validationID, - } +func (s *state) HasExpiry(entry ExpiryEntry) (bool, error) { if has, modified := s.expiryDiff.hasExpiry(entry); modified { return has, nil } return s.expiry.Has(entry), nil } -func (s *state) PutExpiry(timestamp uint64, validationID ids.ID) { - s.expiryDiff.PutExpiry(timestamp, validationID) +func (s *state) PutExpiry(entry ExpiryEntry) { + s.expiryDiff.PutExpiry(entry) } -func (s *state) DeleteExpiry(timestamp uint64, validationID ids.ID) { - s.expiryDiff.DeleteExpiry(timestamp, validationID) +func (s *state) DeleteExpiry(entry ExpiryEntry) { + s.expiryDiff.DeleteExpiry(entry) } func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { From 20ed013f537852b7aa55b5d5e5efb8dbb845bb04 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 15:24:57 -0400 Subject: [PATCH 021/155] Replace iterator equality helper --- utils/iterator/slice.go | 12 +++++ vms/platformvm/state/diff_test.go | 11 +++- vms/platformvm/state/stakers_test.go | 81 ++++++++++++++++------------ vms/platformvm/state/state_test.go | 13 +++-- 4 files changed, 79 insertions(+), 38 deletions(-) diff --git a/utils/iterator/slice.go b/utils/iterator/slice.go index d195a1adc14c..a7b18189aabc 100644 --- a/utils/iterator/slice.go +++ b/utils/iterator/slice.go @@ -5,6 +5,18 @@ package iterator var _ Iterator[any] = (*slice[any])(nil) +// ToSlice returns a slice that contains all of the elements from [it] in order. +// [it] will be released before returning. +func ToSlice[T any](it Iterator[T]) []T { + defer it.Release() + + var elements []T + for it.Next() { + elements = append(elements, it.Value()) + } + return elements +} + type slice[T any] struct { index int elements []T diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index a7eec42364b1..c4246b8aaeec 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/iterator/iteratormock" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/gas" @@ -530,14 +531,20 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() require.Equal(expectedErr, actualErr) if expectedErr == nil { - assertIteratorsEqual(t, expectedCurrentStakerIterator, actualCurrentStakerIterator) + require.Equal( + iterator.ToSlice(expectedCurrentStakerIterator), + iterator.ToSlice(actualCurrentStakerIterator), + ) } expectedPendingStakerIterator, expectedErr := expected.GetPendingStakerIterator() actualPendingStakerIterator, actualErr := actual.GetPendingStakerIterator() require.Equal(expectedErr, actualErr) if expectedErr == nil { - assertIteratorsEqual(t, expectedPendingStakerIterator, actualPendingStakerIterator) + require.Equal( + iterator.ToSlice(expectedPendingStakerIterator), + iterator.ToSlice(actualPendingStakerIterator), + ) } require.Equal(expected.GetTimestamp(), actual.GetTimestamp()) diff --git a/vms/platformvm/state/stakers_test.go b/vms/platformvm/state/stakers_test.go index d536b2a719d8..8141959d80a4 100644 --- a/vms/platformvm/state/stakers_test.go +++ b/vms/platformvm/state/stakers_test.go @@ -86,7 +86,10 @@ func TestBaseStakersValidator(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) stakerIterator := v.GetStakerIterator() - assertIteratorsEqual(t, iterator.FromSlice(delegator), stakerIterator) + require.Equal( + []*Staker{delegator}, + iterator.ToSlice(stakerIterator), + ) v.PutValidator(staker) @@ -97,7 +100,10 @@ func TestBaseStakersValidator(t *testing.T) { v.DeleteDelegator(delegator) stakerIterator = v.GetStakerIterator() - assertIteratorsEqual(t, iterator.FromSlice(staker), stakerIterator) + require.Equal( + []*Staker{staker}, + iterator.ToSlice(stakerIterator), + ) v.DeleteValidator(staker) @@ -105,30 +111,42 @@ func TestBaseStakersValidator(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) stakerIterator = v.GetStakerIterator() - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, stakerIterator) + require.Empty( + iterator.ToSlice(stakerIterator), + ) } func TestBaseStakersDelegator(t *testing.T) { + require := require.New(t) staker := newTestStaker() delegator := newTestStaker() v := newBaseStakers() delegatorIterator := v.GetDelegatorIterator(delegator.SubnetID, delegator.NodeID) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) v.PutDelegator(delegator) delegatorIterator = v.GetDelegatorIterator(delegator.SubnetID, ids.GenerateTestNodeID()) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) delegatorIterator = v.GetDelegatorIterator(delegator.SubnetID, delegator.NodeID) - assertIteratorsEqual(t, iterator.FromSlice(delegator), delegatorIterator) + require.Equal( + []*Staker{delegator}, + iterator.ToSlice(delegatorIterator), + ) v.DeleteDelegator(delegator) delegatorIterator = v.GetDelegatorIterator(delegator.SubnetID, delegator.NodeID) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) v.PutValidator(staker) @@ -136,7 +154,9 @@ func TestBaseStakersDelegator(t *testing.T) { v.DeleteDelegator(delegator) delegatorIterator = v.GetDelegatorIterator(staker.SubnetID, staker.NodeID) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) } func TestDiffStakersValidator(t *testing.T) { @@ -160,7 +180,10 @@ func TestDiffStakersValidator(t *testing.T) { require.Equal(unmodified, status) stakerIterator := v.GetStakerIterator(iterator.Empty[*Staker]{}) - assertIteratorsEqual(t, iterator.FromSlice(delegator), stakerIterator) + require.Equal( + []*Staker{delegator}, + iterator.ToSlice(stakerIterator), + ) require.NoError(v.PutValidator(staker)) @@ -177,7 +200,10 @@ func TestDiffStakersValidator(t *testing.T) { require.Equal(unmodified, status) stakerIterator = v.GetStakerIterator(iterator.Empty[*Staker]{}) - assertIteratorsEqual(t, iterator.FromSlice(delegator), stakerIterator) + require.Equal( + []*Staker{delegator}, + iterator.ToSlice(stakerIterator), + ) } func TestDiffStakersDeleteValidator(t *testing.T) { @@ -198,25 +224,33 @@ func TestDiffStakersDeleteValidator(t *testing.T) { } func TestDiffStakersDelegator(t *testing.T) { + require := require.New(t) staker := newTestStaker() delegator := newTestStaker() v := diffStakers{} - require.NoError(t, v.PutValidator(staker)) + require.NoError(v.PutValidator(staker)) delegatorIterator := v.GetDelegatorIterator(iterator.Empty[*Staker]{}, ids.GenerateTestID(), delegator.NodeID) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) v.PutDelegator(delegator) delegatorIterator = v.GetDelegatorIterator(iterator.Empty[*Staker]{}, delegator.SubnetID, delegator.NodeID) - assertIteratorsEqual(t, iterator.FromSlice(delegator), delegatorIterator) + require.Equal( + []*Staker{delegator}, + iterator.ToSlice(delegatorIterator), + ) v.DeleteDelegator(delegator) delegatorIterator = v.GetDelegatorIterator(iterator.Empty[*Staker]{}, ids.GenerateTestID(), delegator.NodeID) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) } func newTestStaker() *Staker { @@ -235,22 +269,3 @@ func newTestStaker() *Staker { Priority: txs.PrimaryNetworkDelegatorCurrentPriority, } } - -func assertIteratorsEqual(t *testing.T, expected, actual iterator.Iterator[*Staker]) { - require := require.New(t) - - t.Helper() - - for expected.Next() { - require.True(actual.Next()) - - expectedStaker := expected.Value() - actualStaker := actual.Value() - - require.Equal(expectedStaker, actualStaker) - } - require.False(actual.Next()) - - expected.Release() - actual.Release() -} diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 3be526333e3e..515794364d71 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -88,18 +88,25 @@ func TestStateSyncGenesis(t *testing.T) { delegatorIterator, err := state.GetCurrentDelegatorIterator(constants.PrimaryNetworkID, defaultValidatorNodeID) require.NoError(err) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) stakerIterator, err := state.GetCurrentStakerIterator() require.NoError(err) - assertIteratorsEqual(t, iterator.FromSlice(staker), stakerIterator) + require.Equal( + []*Staker{staker}, + iterator.ToSlice(stakerIterator), + ) _, err = state.GetPendingValidator(constants.PrimaryNetworkID, defaultValidatorNodeID) require.ErrorIs(err, database.ErrNotFound) delegatorIterator, err = state.GetPendingDelegatorIterator(constants.PrimaryNetworkID, defaultValidatorNodeID) require.NoError(err) - assertIteratorsEqual(t, iterator.Empty[*Staker]{}, delegatorIterator) + require.Empty( + iterator.ToSlice(delegatorIterator), + ) } // Whenever we store a staker, a whole bunch a data structures are updated From a02b51ccc6070d1944bd9dc6aad22f157b1c5ae5 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 22:23:06 -0400 Subject: [PATCH 022/155] Finish diff implementation --- utils/iterator/deduplicate.go | 40 ++++++++ utils/iterator/deduplicate_test.go | 18 ++++ vms/platformvm/state/diff_test.go | 159 +++++++++++++++++++++++++++++ vms/platformvm/state/expiry.go | 16 +-- 4 files changed, 227 insertions(+), 6 deletions(-) create mode 100644 utils/iterator/deduplicate.go create mode 100644 utils/iterator/deduplicate_test.go diff --git a/utils/iterator/deduplicate.go b/utils/iterator/deduplicate.go new file mode 100644 index 000000000000..b36c1e0a1102 --- /dev/null +++ b/utils/iterator/deduplicate.go @@ -0,0 +1,40 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package iterator + +import "github.com/ava-labs/avalanchego/utils/set" + +var _ Iterator[any] = (*deduplicator[any])(nil) + +type deduplicator[T comparable] struct { + it Iterator[T] + seen set.Set[T] +} + +// Deduplicate returns an iterator that skips the elements that have already +// been returned from [it]. +func Deduplicate[T comparable](it Iterator[T]) Iterator[T] { + return &deduplicator[T]{ + it: it, + } +} + +func (i *deduplicator[_]) Next() bool { + for i.it.Next() { + element := i.it.Value() + if !i.seen.Contains(element) { + i.seen.Add(element) + return true + } + } + return false +} + +func (i *deduplicator[T]) Value() T { + return i.it.Value() +} + +func (i *deduplicator[_]) Release() { + i.it.Release() +} diff --git a/utils/iterator/deduplicate_test.go b/utils/iterator/deduplicate_test.go new file mode 100644 index 000000000000..291d186df574 --- /dev/null +++ b/utils/iterator/deduplicate_test.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package iterator + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDeduplicate(t *testing.T) { + require.Equal( + t, + []int{0, 1, 2, 3}, + ToSlice(Deduplicate(FromSlice(0, 1, 2, 1, 2, 0, 3))), + ) +} diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index c4246b8aaeec..b0b2247a1504 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/iterator/iteratormock" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/gas" "github.com/ava-labs/avalanchego/vms/platformvm/fx/fxmock" @@ -112,6 +113,154 @@ func TestDiffCurrentSupply(t *testing.T) { assertChainsEqual(t, state, d) } +func TestDiffExpiry(t *testing.T) { + type op struct { + put bool + entry ExpiryEntry + } + tests := []struct { + name string + initialExpiries []ExpiryEntry + ops []op + expectedExpiries []ExpiryEntry + }{ + { + name: "empty noop", + }, + { + name: "insert", + ops: []op{ + { + put: true, + entry: ExpiryEntry{Timestamp: 1}, + }, + }, + expectedExpiries: []ExpiryEntry{ + {Timestamp: 1}, + }, + }, + { + name: "remove", + initialExpiries: []ExpiryEntry{ + {Timestamp: 1}, + }, + ops: []op{ + { + put: false, + entry: ExpiryEntry{Timestamp: 1}, + }, + }, + }, + { + name: "add and immediately remove", + ops: []op{ + { + put: true, + entry: ExpiryEntry{Timestamp: 1}, + }, + { + put: false, + entry: ExpiryEntry{Timestamp: 1}, + }, + }, + }, + { + name: "add + remove + add", + ops: []op{ + { + put: true, + entry: ExpiryEntry{Timestamp: 1}, + }, + { + put: false, + entry: ExpiryEntry{Timestamp: 1}, + }, + { + put: true, + entry: ExpiryEntry{Timestamp: 1}, + }, + }, + expectedExpiries: []ExpiryEntry{ + {Timestamp: 1}, + }, + }, + { + name: "everything", + initialExpiries: []ExpiryEntry{ + {Timestamp: 1}, + {Timestamp: 2}, + {Timestamp: 3}, + }, + ops: []op{ + { + put: false, + entry: ExpiryEntry{Timestamp: 1}, + }, + { + put: false, + entry: ExpiryEntry{Timestamp: 2}, + }, + { + put: true, + entry: ExpiryEntry{Timestamp: 1}, + }, + }, + expectedExpiries: []ExpiryEntry{ + {Timestamp: 1}, + {Timestamp: 3}, + }, + }, + } + + for _, test := range tests { + require := require.New(t) + + state := newTestState(t, memdb.New()) + for _, expiry := range test.initialExpiries { + state.PutExpiry(expiry) + } + + d, err := NewDiffOn(state) + require.NoError(err) + + otherExpiries := set.Of(test.initialExpiries...) + for _, op := range test.ops { + if op.put { + d.PutExpiry(op.entry) + } else { + d.DeleteExpiry(op.entry) + } + otherExpiries.Add(op.entry) + } + otherExpiries.Remove(test.expectedExpiries...) + + verifyChain := func(chain Chain) { + expiryIterator, err := chain.GetExpiryIterator() + require.NoError(err) + require.Equal( + test.expectedExpiries, + iterator.ToSlice(expiryIterator), + ) + + for _, expiry := range test.expectedExpiries { + has, err := chain.HasExpiry(expiry) + require.NoError(err) + require.True(has) + } + for expiry := range otherExpiries { + has, err := chain.HasExpiry(expiry) + require.NoError(err) + require.False(has) + } + } + + verifyChain(d) + require.NoError(d.Apply(state)) + verifyChain(state) + assertChainsEqual(t, d, state) + } +} + func TestDiffCurrentValidator(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) @@ -527,6 +676,16 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { t.Helper() + expectedExpiryIterator, expectedErr := expected.GetExpiryIterator() + actualExpiryIterator, actualErr := actual.GetExpiryIterator() + require.Equal(expectedErr, actualErr) + if expectedErr == nil { + require.Equal( + iterator.ToSlice(expectedExpiryIterator), + iterator.ToSlice(actualExpiryIterator), + ) + } + expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() require.Equal(expectedErr, actualErr) diff --git a/vms/platformvm/state/expiry.go b/vms/platformvm/state/expiry.go index dde2e9bbc91b..9bd0139267e5 100644 --- a/vms/platformvm/state/expiry.go +++ b/vms/platformvm/state/expiry.go @@ -87,13 +87,17 @@ func (e *expiryDiff) DeleteExpiry(entry ExpiryEntry) { } func (e *expiryDiff) getExpiryIterator(parentIterator iterator.Iterator[ExpiryEntry]) iterator.Iterator[ExpiryEntry] { - return iterator.Filter( - iterator.Merge( - ExpiryEntry.Less, - parentIterator, - iterator.FromTree(e.added), + // The iterators are deduplicated so that additions that were present in the + // parent iterator are not duplicated. + return iterator.Deduplicate( + iterator.Filter( + iterator.Merge( + ExpiryEntry.Less, + parentIterator, + iterator.FromTree(e.added), + ), + e.removed.Contains, ), - e.removed.Contains, ) } From 7bda9b1e58b6f1ecf273cc4c264379e6b580dd2c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 23:01:10 -0400 Subject: [PATCH 023/155] Finish state implementation --- vms/platformvm/state/state.go | 48 ++++++++++++++++++++++++++++++ vms/platformvm/state/state_test.go | 32 ++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 87e63bd9b9b2..877f6c2b1208 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -305,6 +305,7 @@ type state struct { expiry *btree.BTreeG[ExpiryEntry] expiryDiff *expiryDiff + expiryDB database.Database currentStakers *baseStakers pendingStakers *baseStakers @@ -619,6 +620,7 @@ func New( expiry: btree.NewG(defaultTreeDegree, ExpiryEntry.Less), expiryDiff: newExpiryDiff(), + expiryDB: prefixdb.New(ExpiryReplayProtectionPrefix, baseDB), currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -1367,6 +1369,7 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er func (s *state) load() error { return errors.Join( s.loadMetadata(), + s.loadExpiry(), s.loadCurrentValidators(), s.loadPendingValidators(), s.initValidatorSets(), @@ -1438,6 +1441,23 @@ func (s *state) loadMetadata() error { return nil } +func (s *state) loadExpiry() error { + it := s.expiryDB.NewIterator() + defer it.Release() + + for it.Next() { + key := it.Key() + + var entry ExpiryEntry + if err := entry.Unmarshal(key); err != nil { + return err + } + s.expiry.ReplaceOrInsert(entry) + } + + return nil +} + func (s *state) loadCurrentValidators() error { s.currentStakers = newBaseStakers() @@ -1736,6 +1756,7 @@ func (s *state) write(updateValidators bool, height uint64) error { return errors.Join( s.writeBlocks(), + s.writeExpiry(), s.writeCurrentStakers(updateValidators, height, codecVersion), s.writePendingStakers(), s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers @@ -1754,6 +1775,7 @@ func (s *state) write(updateValidators bool, height uint64) error { func (s *state) Close() error { return errors.Join( + s.expiryDB.Close(), s.pendingSubnetValidatorBaseDB.Close(), s.pendingSubnetDelegatorBaseDB.Close(), s.pendingDelegatorBaseDB.Close(), @@ -1960,6 +1982,32 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { return blkID, nil } +func (s *state) writeExpiry() error { + it := iterator.FromTree(s.expiryDiff.added) + defer it.Release() + + for it.Next() { + entry := it.Value() + s.expiry.ReplaceOrInsert(entry) + + key := entry.Marshal() + if err := s.expiryDB.Put(key, nil); err != nil { + return err + } + } + for removed := range s.expiryDiff.removed { + s.expiry.Delete(removed) + + key := removed.Marshal() + if err := s.expiryDB.Delete(key); err != nil { + return err + } + } + + s.expiryDiff = newExpiryDiff() + return nil +} + func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { delete(s.currentStakers.validatorDiffs, subnetID) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 515794364d71..a0dcd0cb7dcc 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1577,3 +1577,35 @@ func TestGetFeeStateErrors(t *testing.T) { }) } } + +// Verify that committing the state writes the expiry changes to the database +// and that loading the state fetches the expiry from the database. +func TestStateExpiryCommitAndLoad(t *testing.T) { + require := require.New(t) + + db := memdb.New() + s := newTestState(t, db) + + // Populate an entry. + expiry := ExpiryEntry{ + Timestamp: 1, + } + s.PutExpiry(expiry) + require.NoError(s.Commit()) + + // Verify that the entry was written and loaded correctly. + s = newTestState(t, db) + has, err := s.HasExpiry(expiry) + require.NoError(err) + require.True(has) + + // Delete an entry. + s.DeleteExpiry(expiry) + require.NoError(s.Commit()) + + // Verify that the entry was deleted correctly. + s = newTestState(t, db) + has, err = s.HasExpiry(expiry) + require.NoError(err) + require.False(has) +} From f24ca9f8d27fbc7ef8d01c701d0ab7302d5618db Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 23:39:09 -0400 Subject: [PATCH 024/155] lint --- vms/platformvm/state/expiry_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vms/platformvm/state/expiry_test.go b/vms/platformvm/state/expiry_test.go index 7fa82ab06aac..38a0d07f9ab0 100644 --- a/vms/platformvm/state/expiry_test.go +++ b/vms/platformvm/state/expiry_test.go @@ -22,8 +22,7 @@ func FuzzExpiryEntryMarshal(f *testing.F) { marshalledData := entry.Marshal() var parsedEntry ExpiryEntry - err := parsedEntry.Unmarshal(marshalledData) - require.NoError(err) + require.NoError(parsedEntry.Unmarshal(marshalledData)) require.Equal(entry, parsedEntry) }) } From 463ce028a92e78f9b448b1dd26ccc3fc9ae68b51 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 10 Sep 2024 23:45:11 -0400 Subject: [PATCH 025/155] Implement iterator deduplicator --- utils/iterator/deduplicate.go | 40 ++++++++++++++++++++++++++++++ utils/iterator/deduplicate_test.go | 18 ++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 utils/iterator/deduplicate.go create mode 100644 utils/iterator/deduplicate_test.go diff --git a/utils/iterator/deduplicate.go b/utils/iterator/deduplicate.go new file mode 100644 index 000000000000..b36c1e0a1102 --- /dev/null +++ b/utils/iterator/deduplicate.go @@ -0,0 +1,40 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package iterator + +import "github.com/ava-labs/avalanchego/utils/set" + +var _ Iterator[any] = (*deduplicator[any])(nil) + +type deduplicator[T comparable] struct { + it Iterator[T] + seen set.Set[T] +} + +// Deduplicate returns an iterator that skips the elements that have already +// been returned from [it]. +func Deduplicate[T comparable](it Iterator[T]) Iterator[T] { + return &deduplicator[T]{ + it: it, + } +} + +func (i *deduplicator[_]) Next() bool { + for i.it.Next() { + element := i.it.Value() + if !i.seen.Contains(element) { + i.seen.Add(element) + return true + } + } + return false +} + +func (i *deduplicator[T]) Value() T { + return i.it.Value() +} + +func (i *deduplicator[_]) Release() { + i.it.Release() +} diff --git a/utils/iterator/deduplicate_test.go b/utils/iterator/deduplicate_test.go new file mode 100644 index 000000000000..291d186df574 --- /dev/null +++ b/utils/iterator/deduplicate_test.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package iterator + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDeduplicate(t *testing.T) { + require.Equal( + t, + []int{0, 1, 2, 3}, + ToSlice(Deduplicate(FromSlice(0, 1, 2, 1, 2, 0, 3))), + ) +} From 3cc413a1b63753b38cacdc1411a1e552d87afa52 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 11 Sep 2024 11:34:40 -0400 Subject: [PATCH 026/155] Reuse Filter --- utils/iterator/deduplicate.go | 40 ------------------------------ utils/iterator/deduplicate_test.go | 18 -------------- utils/iterator/filter.go | 15 +++++++++++ utils/iterator/filter_test.go | 15 ++++++++--- 4 files changed, 27 insertions(+), 61 deletions(-) delete mode 100644 utils/iterator/deduplicate.go delete mode 100644 utils/iterator/deduplicate_test.go diff --git a/utils/iterator/deduplicate.go b/utils/iterator/deduplicate.go deleted file mode 100644 index b36c1e0a1102..000000000000 --- a/utils/iterator/deduplicate.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package iterator - -import "github.com/ava-labs/avalanchego/utils/set" - -var _ Iterator[any] = (*deduplicator[any])(nil) - -type deduplicator[T comparable] struct { - it Iterator[T] - seen set.Set[T] -} - -// Deduplicate returns an iterator that skips the elements that have already -// been returned from [it]. -func Deduplicate[T comparable](it Iterator[T]) Iterator[T] { - return &deduplicator[T]{ - it: it, - } -} - -func (i *deduplicator[_]) Next() bool { - for i.it.Next() { - element := i.it.Value() - if !i.seen.Contains(element) { - i.seen.Add(element) - return true - } - } - return false -} - -func (i *deduplicator[T]) Value() T { - return i.it.Value() -} - -func (i *deduplicator[_]) Release() { - i.it.Release() -} diff --git a/utils/iterator/deduplicate_test.go b/utils/iterator/deduplicate_test.go deleted file mode 100644 index 291d186df574..000000000000 --- a/utils/iterator/deduplicate_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package iterator - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDeduplicate(t *testing.T) { - require.Equal( - t, - []int{0, 1, 2, 3}, - ToSlice(Deduplicate(FromSlice(0, 1, 2, 1, 2, 0, 3))), - ) -} diff --git a/utils/iterator/filter.go b/utils/iterator/filter.go index e8a11464457d..f26b082aeab2 100644 --- a/utils/iterator/filter.go +++ b/utils/iterator/filter.go @@ -3,6 +3,8 @@ package iterator +import "github.com/ava-labs/avalanchego/utils/set" + var _ Iterator[any] = (*filtered[any])(nil) type filtered[T any] struct { @@ -19,6 +21,19 @@ func Filter[T any](it Iterator[T], filter func(T) bool) Iterator[T] { } } +// Deduplicate returns an iterator that skips the elements that have already +// been returned from [it]. +func Deduplicate[T comparable](it Iterator[T]) Iterator[T] { + var seen set.Set[T] + return Filter(it, func(e T) bool { + if seen.Contains(e) { + return true + } + seen.Add(e) + return false + }) +} + func (i *filtered[_]) Next() bool { for i.it.Next() { element := i.it.Value() diff --git a/utils/iterator/filter_test.go b/utils/iterator/filter_test.go index bf523017fe92..56c47892095e 100644 --- a/utils/iterator/filter_test.go +++ b/utils/iterator/filter_test.go @@ -10,8 +10,9 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/vms/platformvm/state" + + . "github.com/ava-labs/avalanchego/utils/iterator" ) func TestFilter(t *testing.T) { @@ -40,8 +41,8 @@ func TestFilter(t *testing.T) { stakers[3].TxID: stakers[3], } - it := iterator.Filter( - iterator.FromSlice(stakers[:3]...), + it := Filter( + FromSlice(stakers[:3]...), func(staker *state.Staker) bool { _, ok := maskedStakers[staker.TxID] return ok @@ -55,3 +56,11 @@ func TestFilter(t *testing.T) { it.Release() require.False(it.Next()) } + +func TestDeduplicate(t *testing.T) { + require.Equal( + t, + []int{0, 1, 2, 3}, + ToSlice(Deduplicate(FromSlice(0, 1, 2, 1, 2, 0, 3))), + ) +} From 114beeb53adfb00e629f60e6676e28e225aae350 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 11 Sep 2024 12:05:55 -0400 Subject: [PATCH 027/155] nit --- vms/platformvm/state/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 877f6c2b1208..61991e7ac6db 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1450,7 +1450,7 @@ func (s *state) loadExpiry() error { var entry ExpiryEntry if err := entry.Unmarshal(key); err != nil { - return err + return fmt.Errorf("failed to unmarshal ExpiryEntry during load: %w", err) } s.expiry.ReplaceOrInsert(entry) } From 57810a00dda1bd421a5449e1761844f7696b06b2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 11 Sep 2024 19:09:49 -0400 Subject: [PATCH 028/155] Add comments --- vms/platformvm/state/expiry.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/vms/platformvm/state/expiry.go b/vms/platformvm/state/expiry.go index 9bd0139267e5..5a00297c439e 100644 --- a/vms/platformvm/state/expiry.go +++ b/vms/platformvm/state/expiry.go @@ -25,9 +25,19 @@ var ( ) type Expiry interface { + // GetExpiryIterator returns an iterator of all the expiry entries in order + // of lowest to highest timestamp. GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) + + // HasExpiry returns true if the database has the specified entry. HasExpiry(ExpiryEntry) (bool, error) + + // PutExpiry adds the entry to the database. If the entry already exists, it + // is a noop. PutExpiry(ExpiryEntry) + + // DeleteExpiry removes the entry from the database. If the entry doesn't + // exist, it is a noop. DeleteExpiry(ExpiryEntry) } From e89671a15e6f1b8a27397836689d27b6d6149b65 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 12 Sep 2024 11:38:43 -0400 Subject: [PATCH 029/155] otherExpiries -> unexpectedExpiries --- vms/platformvm/state/diff_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index b0b2247a1504..0c763559157d 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -223,16 +223,16 @@ func TestDiffExpiry(t *testing.T) { d, err := NewDiffOn(state) require.NoError(err) - otherExpiries := set.Of(test.initialExpiries...) + unexpectedExpiries := set.Of(test.initialExpiries...) for _, op := range test.ops { if op.put { d.PutExpiry(op.entry) } else { d.DeleteExpiry(op.entry) } - otherExpiries.Add(op.entry) + unexpectedExpiries.Add(op.entry) } - otherExpiries.Remove(test.expectedExpiries...) + unexpectedExpiries.Remove(test.expectedExpiries...) verifyChain := func(chain Chain) { expiryIterator, err := chain.GetExpiryIterator() @@ -247,7 +247,7 @@ func TestDiffExpiry(t *testing.T) { require.NoError(err) require.True(has) } - for expiry := range otherExpiries { + for expiry := range unexpectedExpiries { has, err := chain.HasExpiry(expiry) require.NoError(err) require.False(has) From 72f972c3ece6a6a017b8fb65468ebd371bd3e66e Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 12 Sep 2024 12:00:30 -0400 Subject: [PATCH 030/155] restructure test --- vms/platformvm/state/diff_test.go | 40 ++++++++++++++++--------------- vms/platformvm/state/expiry.go | 14 +++++++---- 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 0c763559157d..b71599928e23 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -119,10 +119,9 @@ func TestDiffExpiry(t *testing.T) { entry ExpiryEntry } tests := []struct { - name string - initialExpiries []ExpiryEntry - ops []op - expectedExpiries []ExpiryEntry + name string + initialExpiries []ExpiryEntry + ops []op }{ { name: "empty noop", @@ -135,9 +134,6 @@ func TestDiffExpiry(t *testing.T) { entry: ExpiryEntry{Timestamp: 1}, }, }, - expectedExpiries: []ExpiryEntry{ - {Timestamp: 1}, - }, }, { name: "remove", @@ -180,9 +176,6 @@ func TestDiffExpiry(t *testing.T) { entry: ExpiryEntry{Timestamp: 1}, }, }, - expectedExpiries: []ExpiryEntry{ - {Timestamp: 1}, - }, }, { name: "everything", @@ -205,10 +198,6 @@ func TestDiffExpiry(t *testing.T) { entry: ExpiryEntry{Timestamp: 1}, }, }, - expectedExpiries: []ExpiryEntry{ - {Timestamp: 1}, - {Timestamp: 3}, - }, }, } @@ -223,26 +212,39 @@ func TestDiffExpiry(t *testing.T) { d, err := NewDiffOn(state) require.NoError(err) - unexpectedExpiries := set.Of(test.initialExpiries...) + var ( + expectedExpiries = set.Of(test.initialExpiries...) + unexpectedExpiries set.Set[ExpiryEntry] + ) for _, op := range test.ops { if op.put { d.PutExpiry(op.entry) + expectedExpiries.Add(op.entry) + unexpectedExpiries.Remove(op.entry) } else { d.DeleteExpiry(op.entry) + expectedExpiries.Remove(op.entry) + unexpectedExpiries.Add(op.entry) } - unexpectedExpiries.Add(op.entry) } - unexpectedExpiries.Remove(test.expectedExpiries...) + + // If expectedExpiries is empty, we want expectedExpiriesSlice to be + // nil. + var expectedExpiriesSlice []ExpiryEntry + if expectedExpiries.Len() > 0 { + expectedExpiriesSlice = expectedExpiries.List() + utils.Sort(expectedExpiriesSlice) + } verifyChain := func(chain Chain) { expiryIterator, err := chain.GetExpiryIterator() require.NoError(err) require.Equal( - test.expectedExpiries, + expectedExpiriesSlice, iterator.ToSlice(expiryIterator), ) - for _, expiry := range test.expectedExpiries { + for expiry := range expectedExpiries { has, err := chain.HasExpiry(expiry) require.NoError(err) require.True(has) diff --git a/vms/platformvm/state/expiry.go b/vms/platformvm/state/expiry.go index 5a00297c439e..ccd37f17db4c 100644 --- a/vms/platformvm/state/expiry.go +++ b/vms/platformvm/state/expiry.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/set" ) @@ -22,6 +23,7 @@ var ( errUnexpectedExpiryEntryLength = fmt.Errorf("expected expiry entry length %d", expiryEntryLength) _ btree.LessFunc[ExpiryEntry] = ExpiryEntry.Less + _ utils.Sortable[ExpiryEntry] = ExpiryEntry{} ) type Expiry interface { @@ -63,15 +65,19 @@ func (e *ExpiryEntry) Unmarshal(data []byte) error { return nil } -// Invariant: Less produces the same ordering as the marshalled bytes. func (e ExpiryEntry) Less(o ExpiryEntry) bool { + return e.Compare(o) == -1 +} + +// Invariant: Compare produces the same ordering as the marshalled bytes. +func (e ExpiryEntry) Compare(o ExpiryEntry) int { switch { case e.Timestamp < o.Timestamp: - return true + return -1 case e.Timestamp > o.Timestamp: - return false + return 1 default: - return e.ValidationID.Compare(o.ValidationID) == -1 + return e.ValidationID.Compare(o.ValidationID) } } From e8eea6facc0c944ca87d821b1245f7172943f446 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 12 Sep 2024 16:51:04 -0400 Subject: [PATCH 031/155] reduce diff --- vms/platformvm/state/diff.go | 16 ++++++------- vms/platformvm/state/expiry.go | 41 +++++++++++----------------------- vms/platformvm/state/state.go | 27 +++++++++------------- 3 files changed, 31 insertions(+), 53 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 81511a5b03dc..24bdabfa96da 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -164,7 +164,7 @@ func (d *diff) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { } func (d *diff) HasExpiry(entry ExpiryEntry) (bool, error) { - if has, modified := d.expiryDiff.hasExpiry(entry); modified { + if has, modified := d.expiryDiff.modified[entry]; modified { return has, nil } @@ -489,14 +489,12 @@ func (d *diff) Apply(baseState Chain) error { for subnetID, supply := range d.currentSupply { baseState.SetCurrentSupply(subnetID, supply) } - addedExpiryIterator := iterator.FromTree(d.expiryDiff.added) - for addedExpiryIterator.Next() { - entry := addedExpiryIterator.Value() - baseState.PutExpiry(entry) - } - addedExpiryIterator.Release() - for removed := range d.expiryDiff.removed { - baseState.DeleteExpiry(removed) + for entry, isAdded := range d.expiryDiff.modified { + if isAdded { + baseState.PutExpiry(entry) + } else { + baseState.DeleteExpiry(entry) + } } for _, subnetValidatorDiffs := range d.currentStakerDiffs.validatorDiffs { for _, validatorDiff := range subnetValidatorDiffs { diff --git a/vms/platformvm/state/expiry.go b/vms/platformvm/state/expiry.go index ccd37f17db4c..b50439ddf20c 100644 --- a/vms/platformvm/state/expiry.go +++ b/vms/platformvm/state/expiry.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/iterator" - "github.com/ava-labs/avalanchego/utils/set" ) // expiryEntry = [timestamp] + [validationID] @@ -82,48 +81,34 @@ func (e ExpiryEntry) Compare(o ExpiryEntry) int { } type expiryDiff struct { - added *btree.BTreeG[ExpiryEntry] - removed set.Set[ExpiryEntry] + modified map[ExpiryEntry]bool // bool represents isAdded + added *btree.BTreeG[ExpiryEntry] } func newExpiryDiff() *expiryDiff { return &expiryDiff{ - added: btree.NewG(defaultTreeDegree, ExpiryEntry.Less), + modified: make(map[ExpiryEntry]bool), + added: btree.NewG(defaultTreeDegree, ExpiryEntry.Less), } } func (e *expiryDiff) PutExpiry(entry ExpiryEntry) { + e.modified[entry] = true e.added.ReplaceOrInsert(entry) - e.removed.Remove(entry) } func (e *expiryDiff) DeleteExpiry(entry ExpiryEntry) { + e.modified[entry] = false e.added.Delete(entry) - e.removed.Add(entry) } func (e *expiryDiff) getExpiryIterator(parentIterator iterator.Iterator[ExpiryEntry]) iterator.Iterator[ExpiryEntry] { - // The iterators are deduplicated so that additions that were present in the - // parent iterator are not duplicated. - return iterator.Deduplicate( - iterator.Filter( - iterator.Merge( - ExpiryEntry.Less, - parentIterator, - iterator.FromTree(e.added), - ), - e.removed.Contains, - ), + return iterator.Merge( + ExpiryEntry.Less, + iterator.Filter(parentIterator, func(entry ExpiryEntry) bool { + _, ok := e.modified[entry] + return ok + }), + iterator.FromTree(e.added), ) } - -func (e *expiryDiff) hasExpiry(entry ExpiryEntry) (bool, bool) { - switch { - case e.removed.Contains(entry): - return false, true - case e.added.Has(entry): - return true, true - default: - return false, false - } -} diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 61991e7ac6db..65623442358b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -703,7 +703,7 @@ func (s *state) GetExpiryIterator() (iterator.Iterator[ExpiryEntry], error) { } func (s *state) HasExpiry(entry ExpiryEntry) (bool, error) { - if has, modified := s.expiryDiff.hasExpiry(entry); modified { + if has, modified := s.expiryDiff.modified[entry]; modified { return has, nil } return s.expiry.Has(entry), nil @@ -1983,23 +1983,18 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { } func (s *state) writeExpiry() error { - it := iterator.FromTree(s.expiryDiff.added) - defer it.Release() - - for it.Next() { - entry := it.Value() - s.expiry.ReplaceOrInsert(entry) - + for entry, isAdded := range s.expiryDiff.modified { key := entry.Marshal() - if err := s.expiryDB.Put(key, nil); err != nil { - return err - } - } - for removed := range s.expiryDiff.removed { - s.expiry.Delete(removed) - key := removed.Marshal() - if err := s.expiryDB.Delete(key); err != nil { + var err error + if isAdded { + s.expiry.ReplaceOrInsert(entry) + err = s.expiryDB.Put(key, nil) + } else { + s.expiry.Delete(entry) + err = s.expiryDB.Delete(key) + } + if err != nil { return err } } From d67408dac3f3bba6e7ba2b48a3c5d9bcb81d9fac Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 12 Sep 2024 16:53:05 -0400 Subject: [PATCH 032/155] nit --- vms/platformvm/state/state.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 65623442358b..50f00e1fe8f7 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1984,9 +1984,10 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { func (s *state) writeExpiry() error { for entry, isAdded := range s.expiryDiff.modified { - key := entry.Marshal() - - var err error + var ( + key = entry.Marshal() + err error + ) if isAdded { s.expiry.ReplaceOrInsert(entry) err = s.expiryDB.Put(key, nil) From 6e457faec93519f76fb806a9390417c92f5b579e Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 13 Sep 2024 10:10:54 -0400 Subject: [PATCH 033/155] Implement acp-77 state diff --- vms/platformvm/state/diff.go | 68 ++++++ vms/platformvm/state/state.go | 1 + vms/platformvm/state/subnet_only_validator.go | 205 ++++++++++++++++-- .../state/subnet_only_validator_test.go | 179 ++++++++++++--- 4 files changed, 405 insertions(+), 48 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 24bdabfa96da..8d9eb5438fcf 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -43,6 +43,7 @@ type diff struct { currentSupply map[ids.ID]uint64 expiryDiff *expiryDiff + sovDiff *subnetOnlyValidatorsDiff currentStakerDiffs diffStakers // map of subnetID -> nodeID -> total accrued delegatee rewards @@ -82,6 +83,7 @@ func NewDiff( feeState: parentState.GetFeeState(), accruedFees: parentState.GetAccruedFees(), expiryDiff: newExpiryDiff(), + sovDiff: newSubnetOnlyValidatorsDiff(), subnetOwners: make(map[ids.ID]fx.Owner), subnetManagers: make(map[ids.ID]chainIDAndAddr), }, nil @@ -184,6 +186,67 @@ func (d *diff) DeleteExpiry(entry ExpiryEntry) { d.expiryDiff.DeleteExpiry(entry) } +func (d *diff) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) { + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + parentIterator, err := parentState.GetActiveSubnetOnlyValidatorsIterator() + if err != nil { + return nil, err + } + + return d.sovDiff.getActiveSubnetOnlyValidatorsIterator(parentIterator), nil +} + +func (d *diff) NumActiveSubnetOnlyValidators() (int, error) { + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return 0, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + count, err := parentState.NumActiveSubnetOnlyValidators() + if err != nil { + return 0, err + } + + return count + d.sovDiff.numAddedActive, nil +} + +func (d *diff) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { + if sov, modified := d.sovDiff.modified[validationID]; modified { + if sov.Weight == 0 { + return SubnetOnlyValidator{}, database.ErrNotFound + } + return sov, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return SubnetOnlyValidator{}, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + return parentState.GetSubnetOnlyValidator(validationID) +} + +func (d *diff) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) { + if has, modified := d.sovDiff.hasSubnetOnlyValidator(subnetID, nodeID); modified { + return has, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return false, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + return parentState.HasSubnetOnlyValidator(subnetID, nodeID) +} + +func (d *diff) PutSubnetOnlyValidator(sov SubnetOnlyValidator) error { + return d.sovDiff.putSubnetOnlyValidator(d, sov) +} + func (d *diff) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { // If the validator was modified in this diff, return the modified // validator. @@ -496,6 +559,11 @@ func (d *diff) Apply(baseState Chain) error { baseState.DeleteExpiry(entry) } } + for _, sov := range d.sovDiff.modified { + if err := baseState.PutSubnetOnlyValidator(sov); err != nil { + return err + } + } for _, subnetValidatorDiffs := range d.currentStakerDiffs.validatorDiffs { for _, validatorDiff := range subnetValidatorDiffs { switch validatorDiff.validatorStatus { diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 50f00e1fe8f7..b6961bf3f271 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -99,6 +99,7 @@ var ( // execution. type Chain interface { Expiry + SubnetOnlyValidators Stakers avax.UTXOAdder avax.UTXOGetter diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 5af028314e6c..cc30ca29f522 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -4,17 +4,57 @@ package state import ( + "bytes" + "errors" "fmt" "github.com/google/btree" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/vms/platformvm/block" ) -var _ btree.LessFunc[*SubnetOnlyValidator] = (*SubnetOnlyValidator).Less +var ( + _ btree.LessFunc[SubnetOnlyValidator] = SubnetOnlyValidator.Less + ErrMutatedSubnetOnlyValidator = errors.New("subnet only validator contains mutated constant fields") + ErrDuplicateSubnetOnlyValidator = errors.New("subnet only validator contains conflicting subnetID + nodeID pair") +) + +type SubnetOnlyValidators interface { + // GetActiveSubnetOnlyValidatorsIterator returns an iterator of all the + // active subnet only validators in increasing order of EndAccumulatedFee. + GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) + + // NumActiveSubnetOnlyValidators returns the number of currently active + // subnet only validators. + NumActiveSubnetOnlyValidators() (int, error) + + // GetSubnetOnlyValidator returns the validator with [validationID] if it + // exists. If the validator does not exist, [err] will equal + // [database.ErrNotFound]. + GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) + + // HasSubnetOnlyValidator returns the validator with [validationID] if it + // exists. If the validator does not exist, [err] will equal + // [database.ErrNotFound]. + HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) + + // PutSubnetOnlyValidator inserts [sov] as a validator. + // + // If inserting this validator attempts to modify any of the constant fields + // of the subnet only validator struct, an error will be returned. + // + // If inserting this validator would cause the mapping of subnetID+nodeID to + // validationID to be non-unique, an error will be returned. + PutSubnetOnlyValidator(sov SubnetOnlyValidator) error +} + +// SubnetOnlyValidator defines an ACP-77 validator. For a given ValidationID, it +// is expected for SubnetID, NodeID, PublicKey, RemainingBalanceOwner, and +// StartTime to be constant. type SubnetOnlyValidator struct { // ValidationID is not serialized because it is used as the key in the // database, so it doesn't need to be stored in the value. @@ -27,6 +67,10 @@ type SubnetOnlyValidator struct { // guaranteed to be populated. PublicKey []byte `serialize:"true"` + // RemainingBalanceOwner is the owner that will be used when returning the + // balance of the validator after removing accrued fees. + RemainingBalanceOwner []byte `serialize:"true"` + // StartTime is the unix timestamp, in seconds, when this validator was // added to the set. StartTime uint64 `serialize:"true"` @@ -46,44 +90,62 @@ type SubnetOnlyValidator struct { // accrue before this validator must be deactivated. It is equal to the // amount of fees this validator is willing to pay plus the amount of // globally accumulated fees when this validator started validating. + // + // If this value is 0, the validator is inactive. EndAccumulatedFee uint64 `serialize:"true"` } -// Less determines a canonical ordering of *SubnetOnlyValidators based on their -// EndAccumulatedFees and ValidationIDs. -// -// Returns true if: -// -// 1. This validator has a lower EndAccumulatedFee than the other. -// 2. This validator has an equal EndAccumulatedFee to the other and has a -// lexicographically lower ValidationID. -func (v *SubnetOnlyValidator) Less(o *SubnetOnlyValidator) bool { +func (v SubnetOnlyValidator) Less(o SubnetOnlyValidator) bool { + return v.Compare(o) == -1 +} + +// Compare determines a canonical ordering of *SubnetOnlyValidators based on +// their EndAccumulatedFees and ValidationIDs. Lower EndAccumulatedFees result +// in an earlier ordering. +func (v SubnetOnlyValidator) Compare(o SubnetOnlyValidator) int { switch { case v.EndAccumulatedFee < o.EndAccumulatedFee: - return true + return -1 case o.EndAccumulatedFee < v.EndAccumulatedFee: - return false + return 1 default: - return v.ValidationID.Compare(o.ValidationID) == -1 + return v.ValidationID.Compare(o.ValidationID) + } +} + +// validateConstants returns true if the constants of this validator have not +// been modified. +func (v SubnetOnlyValidator) validateConstants(o SubnetOnlyValidator) bool { + if v.ValidationID != o.ValidationID { + return true } + return v.SubnetID == o.SubnetID && + v.NodeID == o.NodeID && + bytes.Equal(v.PublicKey, o.PublicKey) && + bytes.Equal(v.RemainingBalanceOwner, o.RemainingBalanceOwner) && + v.StartTime == o.StartTime } -func getSubnetOnlyValidator(db database.KeyValueReader, validationID ids.ID) (*SubnetOnlyValidator, error) { +func (v SubnetOnlyValidator) isActive() bool { + return v.Weight != 0 && v.EndAccumulatedFee != 0 +} + +func getSubnetOnlyValidator(db database.KeyValueReader, validationID ids.ID) (SubnetOnlyValidator, error) { bytes, err := db.Get(validationID[:]) if err != nil { - return nil, err + return SubnetOnlyValidator{}, err } - vdr := &SubnetOnlyValidator{ + vdr := SubnetOnlyValidator{ ValidationID: validationID, } - if _, err = block.GenesisCodec.Unmarshal(bytes, vdr); err != nil { - return nil, fmt.Errorf("failed to unmarshal SubnetOnlyValidator: %w", err) + if _, err = block.GenesisCodec.Unmarshal(bytes, &vdr); err != nil { + return SubnetOnlyValidator{}, fmt.Errorf("failed to unmarshal SubnetOnlyValidator: %w", err) } return vdr, err } -func putSubnetOnlyValidator(db database.KeyValueWriter, vdr *SubnetOnlyValidator) error { +func putSubnetOnlyValidator(db database.KeyValueWriter, vdr SubnetOnlyValidator) error { bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, vdr) if err != nil { return fmt.Errorf("failed to marshal SubnetOnlyValidator: %w", err) @@ -94,3 +156,108 @@ func putSubnetOnlyValidator(db database.KeyValueWriter, vdr *SubnetOnlyValidator func deleteSubnetOnlyValidator(db database.KeyValueDeleter, validationID ids.ID) error { return db.Delete(validationID[:]) } + +type subnetIDNodeID struct { + subnetID ids.ID + nodeID ids.NodeID +} + +type subnetOnlyValidatorsDiff struct { + numAddedActive int // May be negative + modified map[ids.ID]SubnetOnlyValidator + modifiedHasNodeIDs map[subnetIDNodeID]bool + active *btree.BTreeG[SubnetOnlyValidator] +} + +func newSubnetOnlyValidatorsDiff() *subnetOnlyValidatorsDiff { + return &subnetOnlyValidatorsDiff{ + modified: make(map[ids.ID]SubnetOnlyValidator), + modifiedHasNodeIDs: make(map[subnetIDNodeID]bool), + active: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), + } +} + +func (d *subnetOnlyValidatorsDiff) getActiveSubnetOnlyValidatorsIterator(parentIterator iterator.Iterator[SubnetOnlyValidator]) iterator.Iterator[SubnetOnlyValidator] { + return iterator.Merge( + SubnetOnlyValidator.Less, + iterator.Filter(parentIterator, func(sov SubnetOnlyValidator) bool { + _, ok := d.modified[sov.ValidationID] + return ok + }), + iterator.FromTree(d.active), + ) +} + +func (d *subnetOnlyValidatorsDiff) hasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, bool) { + subnetIDNodeID := subnetIDNodeID{ + subnetID: subnetID, + nodeID: nodeID, + } + has, modified := d.modifiedHasNodeIDs[subnetIDNodeID] + return has, modified +} + +func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValidators, sov SubnetOnlyValidator) error { + diff, err := numActiveSubnetOnlyValidatorChange(state, sov) + if err != nil { + return err + } + d.numAddedActive += diff + + if prevSOV, ok := d.modified[sov.ValidationID]; ok { + prevSubnetIDNodeID := subnetIDNodeID{ + subnetID: prevSOV.SubnetID, + nodeID: prevSOV.NodeID, + } + d.modifiedHasNodeIDs[prevSubnetIDNodeID] = false + d.active.Delete(prevSOV) + } + d.modified[sov.ValidationID] = sov + + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + isDeleted := sov.Weight == 0 + d.modifiedHasNodeIDs[subnetIDNodeID] = !isDeleted + if isDeleted || sov.EndAccumulatedFee == 0 { + // Validator is being deleted or is inactive + return nil + } + d.active.ReplaceOrInsert(sov) + return nil +} + +// numActiveSubnetOnlyValidatorChange returns the change in the number of active +// subnet only validators if [sov] were to be inserted into [state]. If it is +// invalid for [sov] to be inserted, an error is returned. +func numActiveSubnetOnlyValidatorChange(state SubnetOnlyValidators, sov SubnetOnlyValidator) (int, error) { + switch priorSOV, err := state.GetSubnetOnlyValidator(sov.ValidationID); err { + case nil: + if !priorSOV.validateConstants(sov) { + return 0, ErrMutatedSubnetOnlyValidator + } + switch { + case !priorSOV.isActive() && sov.isActive(): + return 1, nil // Increasing the number of active validators + case priorSOV.isActive() && !sov.isActive(): + return -1, nil // Decreasing the number of active validators + default: + return 0, nil + } + case database.ErrNotFound: + has, err := state.HasSubnetOnlyValidator(sov.SubnetID, sov.NodeID) + if err != nil { + return 0, err + } + if has { + return 0, ErrDuplicateSubnetOnlyValidator + } + if sov.isActive() { + return 1, nil // Increasing the number of active validators + } + return 0, nil // Adding an inactive validator + default: + return 0, err + } +} diff --git a/vms/platformvm/state/subnet_only_validator_test.go b/vms/platformvm/state/subnet_only_validator_test.go index bcbb21e0027c..e76c4b73a242 100644 --- a/vms/platformvm/state/subnet_only_validator_test.go +++ b/vms/platformvm/state/subnet_only_validator_test.go @@ -12,62 +12,171 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -func TestSubnetOnlyValidator_Less(t *testing.T) { +func TestSubnetOnlyValidator_Compare(t *testing.T) { tests := []struct { - name string - v *SubnetOnlyValidator - o *SubnetOnlyValidator - equal bool + name string + v SubnetOnlyValidator + o SubnetOnlyValidator + expected int }{ { name: "v.EndAccumulatedFee < o.EndAccumulatedFee", - v: &SubnetOnlyValidator{ + v: SubnetOnlyValidator{ ValidationID: ids.GenerateTestID(), EndAccumulatedFee: 1, }, - o: &SubnetOnlyValidator{ + o: SubnetOnlyValidator{ ValidationID: ids.GenerateTestID(), EndAccumulatedFee: 2, }, - equal: false, + expected: -1, }, { name: "v.EndAccumulatedFee = o.EndAccumulatedFee, v.ValidationID < o.ValidationID", - v: &SubnetOnlyValidator{ + v: SubnetOnlyValidator{ ValidationID: ids.ID{0}, EndAccumulatedFee: 1, }, - o: &SubnetOnlyValidator{ + o: SubnetOnlyValidator{ ValidationID: ids.ID{1}, EndAccumulatedFee: 1, }, - equal: false, + expected: -1, }, { name: "v.EndAccumulatedFee = o.EndAccumulatedFee, v.ValidationID = o.ValidationID", - v: &SubnetOnlyValidator{ + v: SubnetOnlyValidator{ ValidationID: ids.ID{0}, EndAccumulatedFee: 1, }, - o: &SubnetOnlyValidator{ + o: SubnetOnlyValidator{ ValidationID: ids.ID{0}, EndAccumulatedFee: 1, }, - equal: true, + expected: 0, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { require := require.New(t) - less := test.v.Less(test.o) - require.Equal(!test.equal, less) + require.Equal(test.expected, test.v.Compare(test.o)) + require.Equal(-test.expected, test.o.Compare(test.v)) + require.Equal(test.expected == -1, test.v.Less(test.o)) + require.False(test.o.Less(test.v)) + }) + } +} + +func TestSubnetOnlyValidator_validateConstants(t *testing.T) { + sov := SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + PublicKey: utils.RandomBytes(bls.PublicKeyLen), + RemainingBalanceOwner: utils.RandomBytes(32), + StartTime: rand.Uint64(), // #nosec G404 + } + + tests := []struct { + name string + v SubnetOnlyValidator + expected bool + }{ + { + name: "equal", + v: sov, + expected: true, + }, + { + name: "everything is different", + v: SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + PublicKey: utils.RandomBytes(bls.PublicKeyLen), + RemainingBalanceOwner: utils.RandomBytes(32), + StartTime: rand.Uint64(), // #nosec G404 + }, + expected: true, + }, + { + name: "different subnetID", + v: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + SubnetID: ids.GenerateTestID(), + NodeID: sov.NodeID, + PublicKey: sov.PublicKey, + RemainingBalanceOwner: sov.RemainingBalanceOwner, + StartTime: sov.StartTime, + }, + }, + { + name: "different nodeID", + v: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: ids.GenerateTestNodeID(), + PublicKey: sov.PublicKey, + RemainingBalanceOwner: sov.RemainingBalanceOwner, + StartTime: sov.StartTime, + }, + }, + { + name: "different publicKey", + v: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: utils.RandomBytes(bls.PublicKeyLen), + RemainingBalanceOwner: sov.RemainingBalanceOwner, + StartTime: sov.StartTime, + }, + }, + { + name: "different remainingBalanceOwner", + v: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: sov.PublicKey, + RemainingBalanceOwner: utils.RandomBytes(32), + StartTime: sov.StartTime, + }, + }, + { + name: "different startTime", + v: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: sov.PublicKey, + RemainingBalanceOwner: sov.RemainingBalanceOwner, + StartTime: rand.Uint64(), // #nosec G404 + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sov := sov + v := test.v + + randomize := func(v *SubnetOnlyValidator) { + v.Weight = rand.Uint64() // #nosec G404 + v.MinNonce = rand.Uint64() // #nosec G404 + v.EndAccumulatedFee = rand.Uint64() // #nosec G404 + } + randomize(&sov) + randomize(&v) - greater := test.o.Less(test.v) - require.False(greater) + require.Equal(t, test.expected, sov.validateConstants(v)) }) } } @@ -78,22 +187,34 @@ func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) + pk := bls.PublicFromSecretKey(sk) + pkBytes := bls.PublicKeyToUncompressedBytes(pk) + + var owner fx.Owner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + } + ownerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &owner) + require.NoError(err) - vdr := &SubnetOnlyValidator{ - ValidationID: ids.GenerateTestID(), - SubnetID: ids.GenerateTestID(), - NodeID: ids.GenerateTestNodeID(), - PublicKey: bls.PublicKeyToUncompressedBytes(bls.PublicFromSecretKey(sk)), - StartTime: rand.Uint64(), // #nosec G404 - Weight: rand.Uint64(), // #nosec G404 - MinNonce: rand.Uint64(), // #nosec G404 - EndAccumulatedFee: rand.Uint64(), // #nosec G404 + vdr := SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + PublicKey: pkBytes, + RemainingBalanceOwner: ownerBytes, + StartTime: rand.Uint64(), // #nosec G404 + Weight: rand.Uint64(), // #nosec G404 + MinNonce: rand.Uint64(), // #nosec G404 + EndAccumulatedFee: rand.Uint64(), // #nosec G404 } // Validator hasn't been put on disk yet gotVdr, err := getSubnetOnlyValidator(db, vdr.ValidationID) require.ErrorIs(err, database.ErrNotFound) - require.Nil(gotVdr) + require.Zero(gotVdr) // Place the validator on disk require.NoError(putSubnetOnlyValidator(db, vdr)) @@ -109,5 +230,5 @@ func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { // Verify that the validator has been removed from disk gotVdr, err = getSubnetOnlyValidator(db, vdr.ValidationID) require.ErrorIs(err, database.ErrNotFound) - require.Nil(gotVdr) + require.Zero(gotVdr) } From 86754d0e366b98a6551f45450c9b0e4fd2ae1fed Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 13 Sep 2024 10:43:21 -0400 Subject: [PATCH 034/155] Fix apply reordering --- vms/platformvm/state/diff.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 8d9eb5438fcf..e63358e1a515 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -559,7 +559,22 @@ func (d *diff) Apply(baseState Chain) error { baseState.DeleteExpiry(entry) } } + // Ensure that all sov deletions happen before any sov additions. This + // ensures that a subnetID+nodeID pair that was deleted and then re-added in + // a single diff can't get reordered into the addition happening first; + // which would return an error. for _, sov := range d.sovDiff.modified { + if sov.Weight != 0 { + continue + } + if err := baseState.PutSubnetOnlyValidator(sov); err != nil { + return err + } + } + for _, sov := range d.sovDiff.modified { + if sov.Weight == 0 { + continue + } if err := baseState.PutSubnetOnlyValidator(sov); err != nil { return err } From c68afecbe471206e61bac2fc92bb03cdf1aa228c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 13 Sep 2024 23:11:12 -0400 Subject: [PATCH 035/155] Implement acp-77 state diff without historical diffs --- .../block/executor/verifier_test.go | 5 + vms/platformvm/state/diff.go | 40 +- vms/platformvm/state/diff_test.go | 476 ++++++++++++++++-- vms/platformvm/state/mock_chain.go | 73 +++ vms/platformvm/state/mock_diff.go | 73 +++ vms/platformvm/state/mock_state.go | 73 +++ vms/platformvm/state/state.go | 172 +++++++ vms/platformvm/state/subnet_only_validator.go | 6 +- 8 files changed, 845 insertions(+), 73 deletions(-) diff --git a/vms/platformvm/block/executor/verifier_test.go b/vms/platformvm/block/executor/verifier_test.go index 5b786b0e33d8..bf44ff70d7a6 100644 --- a/vms/platformvm/block/executor/verifier_test.go +++ b/vms/platformvm/block/executor/verifier_test.go @@ -104,6 +104,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { parentOnAcceptState.EXPECT().GetTimestamp().Return(timestamp).Times(2) parentOnAcceptState.EXPECT().GetFeeState().Return(gas.State{}).Times(2) parentOnAcceptState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(2) + parentOnAcceptState.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(2) backend := &backend{ lastAccepted: parentID, @@ -336,6 +337,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { parentState.EXPECT().GetTimestamp().Return(timestamp).Times(1) parentState.EXPECT().GetFeeState().Return(gas.State{}).Times(1) parentState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + parentState.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) parentStatelessBlk.EXPECT().Height().Return(uint64(1)).Times(1) mempool.EXPECT().Remove(apricotBlk.Txs()).Times(1) @@ -598,6 +600,7 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { s.EXPECT().GetTimestamp().Return(parentTime).Times(3) s.EXPECT().GetFeeState().Return(gas.State{}).Times(3) s.EXPECT().GetAccruedFees().Return(uint64(0)).Times(3) + s.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(3) onDecisionState, err := state.NewDiff(parentID, backend) require.NoError(err) @@ -696,6 +699,7 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { s.EXPECT().GetTimestamp().Return(parentTime).Times(3) s.EXPECT().GetFeeState().Return(gas.State{}).Times(3) s.EXPECT().GetAccruedFees().Return(uint64(0)).Times(3) + s.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(3) onDecisionState, err := state.NewDiff(parentID, backend) require.NoError(err) @@ -812,6 +816,7 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { parentState.EXPECT().GetTimestamp().Return(timestamp).Times(1) parentState.EXPECT().GetFeeState().Return(gas.State{}).Times(1) parentState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + parentState.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) parentStatelessBlk.EXPECT().Parent().Return(grandParentID).Times(1) err = verifier.ApricotStandardBlock(blk) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index e63358e1a515..deb09026282d 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -35,9 +35,10 @@ type diff struct { parentID ids.ID stateVersions Versions - timestamp time.Time - feeState gas.State - accruedFees uint64 + timestamp time.Time + feeState gas.State + accruedFees uint64 + parentActiveSOVs int // Subnet ID --> supply of native asset of the subnet currentSupply map[ids.ID]uint64 @@ -77,15 +78,16 @@ func NewDiff( return nil, fmt.Errorf("%w: %s", ErrMissingParentState, parentID) } return &diff{ - parentID: parentID, - stateVersions: stateVersions, - timestamp: parentState.GetTimestamp(), - feeState: parentState.GetFeeState(), - accruedFees: parentState.GetAccruedFees(), - expiryDiff: newExpiryDiff(), - sovDiff: newSubnetOnlyValidatorsDiff(), - subnetOwners: make(map[ids.ID]fx.Owner), - subnetManagers: make(map[ids.ID]chainIDAndAddr), + parentID: parentID, + stateVersions: stateVersions, + timestamp: parentState.GetTimestamp(), + feeState: parentState.GetFeeState(), + accruedFees: parentState.GetAccruedFees(), + parentActiveSOVs: parentState.NumActiveSubnetOnlyValidators(), + expiryDiff: newExpiryDiff(), + sovDiff: newSubnetOnlyValidatorsDiff(), + subnetOwners: make(map[ids.ID]fx.Owner), + subnetManagers: make(map[ids.ID]chainIDAndAddr), }, nil } @@ -200,18 +202,8 @@ func (d *diff) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[Subnet return d.sovDiff.getActiveSubnetOnlyValidatorsIterator(parentIterator), nil } -func (d *diff) NumActiveSubnetOnlyValidators() (int, error) { - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return 0, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - - count, err := parentState.NumActiveSubnetOnlyValidators() - if err != nil { - return 0, err - } - - return count + d.sovDiff.numAddedActive, nil +func (d *diff) NumActiveSubnetOnlyValidators() int { + return d.parentActiveSOVs + d.sovDiff.numAddedActive } func (d *diff) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index b71599928e23..a34a5b40c8f2 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -4,6 +4,7 @@ package state import ( + "math/rand" "testing" "time" @@ -202,64 +203,429 @@ func TestDiffExpiry(t *testing.T) { } for _, test := range tests { - require := require.New(t) + t.Run(test.name, func(t *testing.T) { + require := require.New(t) - state := newTestState(t, memdb.New()) - for _, expiry := range test.initialExpiries { - state.PutExpiry(expiry) - } + state := newTestState(t, memdb.New()) + for _, expiry := range test.initialExpiries { + state.PutExpiry(expiry) + } - d, err := NewDiffOn(state) - require.NoError(err) + d, err := NewDiffOn(state) + require.NoError(err) - var ( - expectedExpiries = set.Of(test.initialExpiries...) - unexpectedExpiries set.Set[ExpiryEntry] - ) - for _, op := range test.ops { - if op.put { - d.PutExpiry(op.entry) - expectedExpiries.Add(op.entry) - unexpectedExpiries.Remove(op.entry) - } else { - d.DeleteExpiry(op.entry) - expectedExpiries.Remove(op.entry) - unexpectedExpiries.Add(op.entry) + var ( + expectedExpiries = set.Of(test.initialExpiries...) + unexpectedExpiries set.Set[ExpiryEntry] + ) + for _, op := range test.ops { + if op.put { + d.PutExpiry(op.entry) + expectedExpiries.Add(op.entry) + unexpectedExpiries.Remove(op.entry) + } else { + d.DeleteExpiry(op.entry) + expectedExpiries.Remove(op.entry) + unexpectedExpiries.Add(op.entry) + } } - } - // If expectedExpiries is empty, we want expectedExpiriesSlice to be - // nil. - var expectedExpiriesSlice []ExpiryEntry - if expectedExpiries.Len() > 0 { - expectedExpiriesSlice = expectedExpiries.List() - utils.Sort(expectedExpiriesSlice) - } + // If expectedExpiries is empty, we want expectedExpiriesSlice to be + // nil. + var expectedExpiriesSlice []ExpiryEntry + if expectedExpiries.Len() > 0 { + expectedExpiriesSlice = expectedExpiries.List() + utils.Sort(expectedExpiriesSlice) + } - verifyChain := func(chain Chain) { - expiryIterator, err := chain.GetExpiryIterator() + verifyChain := func(chain Chain) { + expiryIterator, err := chain.GetExpiryIterator() + require.NoError(err) + require.Equal( + expectedExpiriesSlice, + iterator.ToSlice(expiryIterator), + ) + + for expiry := range expectedExpiries { + has, err := chain.HasExpiry(expiry) + require.NoError(err) + require.True(has) + } + for expiry := range unexpectedExpiries { + has, err := chain.HasExpiry(expiry) + require.NoError(err) + require.False(has) + } + } + + verifyChain(d) + require.NoError(d.Apply(state)) + verifyChain(state) + assertChainsEqual(t, d, state) + }) + } +} + +func TestDiffSubnetOnlyValidators(t *testing.T) { + sov := SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + } + + tests := []struct { + name string + initial []SubnetOnlyValidator + sovs []SubnetOnlyValidator + }{ + { + name: "empty noop", + }, + { + name: "initially active not modified", + initial: []SubnetOnlyValidator{ + { + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + }, + { + name: "initially inactive not modified", + initial: []SubnetOnlyValidator{ + { + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + }, + { + name: "initially active removed", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 0, // Removed + }, + }, + }, + { + name: "initially inactive removed", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 0, // Removed + }, + }, + }, + { + name: "increase active weight", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 2, // Increased + EndAccumulatedFee: 1, // Active + }, + }, + }, + { + name: "deactivate", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + }, + { + name: "reactivate", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + }, + { + name: "update multiple times", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 2, // Not removed + EndAccumulatedFee: 1, // Inactive + }, + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 3, // Not removed + EndAccumulatedFee: 1, // Inactive + }, + }, + }, + { + name: "change validationID", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 0, // Removed + }, + { + ValidationID: ids.GenerateTestID(), + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Inactive + }, + }, + }, + { + name: "added and removed", + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + Weight: 0, // Removed + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + state := newTestState(t, memdb.New()) + expectedSOVs := make(map[ids.ID]SubnetOnlyValidator) + for _, sov := range test.initial { + require.NoError(state.PutSubnetOnlyValidator(sov)) + expectedSOVs[sov.ValidationID] = sov + } + + d, err := NewDiffOn(state) require.NoError(err) - require.Equal( - expectedExpiriesSlice, - iterator.ToSlice(expiryIterator), - ) - for expiry := range expectedExpiries { - has, err := chain.HasExpiry(expiry) - require.NoError(err) - require.True(has) + for _, sov := range test.sovs { + require.NoError(d.PutSubnetOnlyValidator(sov)) + expectedSOVs[sov.ValidationID] = sov } - for expiry := range unexpectedExpiries { - has, err := chain.HasExpiry(expiry) + + verifyChain := func(chain Chain) { + for _, expectedSOV := range expectedSOVs { + if expectedSOV.Weight != 0 { + continue + } + + sov, err := chain.GetSubnetOnlyValidator(expectedSOV.ValidationID) + require.ErrorIs(err, database.ErrNotFound) + require.Zero(sov) + } + + var expectedActive []SubnetOnlyValidator + for _, expectedSOV := range expectedSOVs { + if expectedSOV.Weight == 0 { + continue + } + + sov, err := chain.GetSubnetOnlyValidator(expectedSOV.ValidationID) + require.NoError(err) + require.Equal(expectedSOV, sov) + + has, err := chain.HasSubnetOnlyValidator(expectedSOV.SubnetID, expectedSOV.NodeID) + require.NoError(err) + require.True(has) + + if expectedSOV.isActive() { + expectedActive = append(expectedActive, expectedSOV) + } + } + utils.Sort(expectedActive) + + activeIterator, err := chain.GetActiveSubnetOnlyValidatorsIterator() require.NoError(err) - require.False(has) + require.Equal( + expectedActive, + iterator.ToSlice(activeIterator), + ) + + require.Equal(len(expectedActive), chain.NumActiveSubnetOnlyValidators()) } - } - verifyChain(d) - require.NoError(d.Apply(state)) - verifyChain(state) - assertChainsEqual(t, d, state) + verifyChain(d) + require.NoError(d.Apply(state)) + verifyChain(state) + assertChainsEqual(t, state, d) + }) + } +} + +func TestDiffSubnetOnlyValidatorsErrors(t *testing.T) { + sov := SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + Weight: 1, // Not removed + } + + tests := []struct { + name string + initialEndAccumulatedFee uint64 + sov SubnetOnlyValidator + expectedErr error + }{ + { + name: "mutate active constants", + initialEndAccumulatedFee: 1, + sov: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + NodeID: ids.GenerateTestNodeID(), + }, + expectedErr: ErrMutatedSubnetOnlyValidator, + }, + { + name: "mutate inactive constants", + initialEndAccumulatedFee: 0, + sov: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + NodeID: ids.GenerateTestNodeID(), + }, + expectedErr: ErrMutatedSubnetOnlyValidator, + }, + { + name: "duplicate active subnetID and nodeID pair", + initialEndAccumulatedFee: 1, + sov: SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + NodeID: sov.NodeID, + }, + expectedErr: ErrDuplicateSubnetOnlyValidator, + }, + { + name: "duplicate inactive subnetID and nodeID pair", + initialEndAccumulatedFee: 0, + sov: SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + NodeID: sov.NodeID, + }, + expectedErr: ErrDuplicateSubnetOnlyValidator, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + state := newTestState(t, memdb.New()) + + sov.EndAccumulatedFee = test.initialEndAccumulatedFee + require.NoError(state.PutSubnetOnlyValidator(sov)) + + d, err := NewDiffOn(state) + require.NoError(err) + + // Initialize subnetID, weight, and endAccumulatedFee as they are + // constant among all tests. + test.sov.SubnetID = sov.SubnetID + test.sov.Weight = 1 // Not removed + test.sov.EndAccumulatedFee = rand.Uint64() //#nosec G404 + err = d.PutSubnetOnlyValidator(test.sov) + require.ErrorIs(err, test.expectedErr) + + // The invalid addition should not have modified the diff. + assertChainsEqual(t, state, d) + }) } } @@ -272,6 +638,7 @@ func TestDiffCurrentValidator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) require.NoError(err) @@ -307,6 +674,7 @@ func TestDiffPendingValidator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) require.NoError(err) @@ -348,6 +716,7 @@ func TestDiffCurrentDelegator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) require.NoError(err) @@ -395,6 +764,7 @@ func TestDiffPendingDelegator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) require.NoError(err) @@ -536,6 +906,7 @@ func TestDiffTx(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) require.NoError(err) @@ -634,6 +1005,7 @@ func TestDiffUTXO(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) require.NoError(err) @@ -688,6 +1060,18 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { ) } + expectedActiveSOVsIterator, expectedErr := expected.GetActiveSubnetOnlyValidatorsIterator() + actualActiveSOVsIterator, actualErr := actual.GetActiveSubnetOnlyValidatorsIterator() + require.Equal(expectedErr, actualErr) + if expectedErr == nil { + require.Equal( + iterator.ToSlice(expectedActiveSOVsIterator), + iterator.ToSlice(actualActiveSOVsIterator), + ) + } + + require.Equal(expected.NumActiveSubnetOnlyValidators(), actual.NumActiveSubnetOnlyValidators()) + expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() require.Equal(expectedErr, actualErr) diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 3b380a87a8b8..2ce5a181a684 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -204,6 +204,21 @@ func (mr *MockChainMockRecorder) GetAccruedFees() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccruedFees", reflect.TypeOf((*MockChain)(nil).GetAccruedFees)) } +// GetActiveSubnetOnlyValidatorsIterator mocks base method. +func (m *MockChain) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveSubnetOnlyValidatorsIterator") + ret0, _ := ret[0].(iterator.Iterator[SubnetOnlyValidator]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveSubnetOnlyValidatorsIterator indicates an expected call of GetActiveSubnetOnlyValidatorsIterator. +func (mr *MockChainMockRecorder) GetActiveSubnetOnlyValidatorsIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveSubnetOnlyValidatorsIterator", reflect.TypeOf((*MockChain)(nil).GetActiveSubnetOnlyValidatorsIterator)) +} + // GetCurrentDelegatorIterator mocks base method. func (m *MockChain) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (iterator.Iterator[*Staker], error) { m.ctrl.T.Helper() @@ -369,6 +384,21 @@ func (mr *MockChainMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockChain)(nil).GetSubnetManager), subnetID) } +// GetSubnetOnlyValidator mocks base method. +func (m *MockChain) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetOnlyValidator", validationID) + ret0, _ := ret[0].(SubnetOnlyValidator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOnlyValidator indicates an expected call of GetSubnetOnlyValidator. +func (mr *MockChainMockRecorder) GetSubnetOnlyValidator(validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOnlyValidator", reflect.TypeOf((*MockChain)(nil).GetSubnetOnlyValidator), validationID) +} + // GetSubnetOwner mocks base method. func (m *MockChain) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -459,6 +489,35 @@ func (mr *MockChainMockRecorder) HasExpiry(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockChain)(nil).HasExpiry), arg0) } +// HasSubnetOnlyValidator mocks base method. +func (m *MockChain) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasSubnetOnlyValidator", subnetID, nodeID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasSubnetOnlyValidator indicates an expected call of HasSubnetOnlyValidator. +func (mr *MockChainMockRecorder) HasSubnetOnlyValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasSubnetOnlyValidator", reflect.TypeOf((*MockChain)(nil).HasSubnetOnlyValidator), subnetID, nodeID) +} + +// NumActiveSubnetOnlyValidators mocks base method. +func (m *MockChain) NumActiveSubnetOnlyValidators() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumActiveSubnetOnlyValidators") + ret0, _ := ret[0].(int) + return ret0 +} + +// NumActiveSubnetOnlyValidators indicates an expected call of NumActiveSubnetOnlyValidators. +func (mr *MockChainMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockChain)(nil).NumActiveSubnetOnlyValidators)) +} + // PutCurrentDelegator mocks base method. func (m *MockChain) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -523,6 +582,20 @@ func (mr *MockChainMockRecorder) PutPendingValidator(staker any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockChain)(nil).PutPendingValidator), staker) } +// PutSubnetOnlyValidator mocks base method. +func (m *MockChain) PutSubnetOnlyValidator(sov SubnetOnlyValidator) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutSubnetOnlyValidator", sov) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutSubnetOnlyValidator indicates an expected call of PutSubnetOnlyValidator. +func (mr *MockChainMockRecorder) PutSubnetOnlyValidator(sov any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutSubnetOnlyValidator", reflect.TypeOf((*MockChain)(nil).PutSubnetOnlyValidator), sov) +} + // SetAccruedFees mocks base method. func (m *MockChain) SetAccruedFees(f uint64) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index 77edfde92aaf..c2138705dc66 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -218,6 +218,21 @@ func (mr *MockDiffMockRecorder) GetAccruedFees() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccruedFees", reflect.TypeOf((*MockDiff)(nil).GetAccruedFees)) } +// GetActiveSubnetOnlyValidatorsIterator mocks base method. +func (m *MockDiff) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveSubnetOnlyValidatorsIterator") + ret0, _ := ret[0].(iterator.Iterator[SubnetOnlyValidator]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveSubnetOnlyValidatorsIterator indicates an expected call of GetActiveSubnetOnlyValidatorsIterator. +func (mr *MockDiffMockRecorder) GetActiveSubnetOnlyValidatorsIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveSubnetOnlyValidatorsIterator", reflect.TypeOf((*MockDiff)(nil).GetActiveSubnetOnlyValidatorsIterator)) +} + // GetCurrentDelegatorIterator mocks base method. func (m *MockDiff) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (iterator.Iterator[*Staker], error) { m.ctrl.T.Helper() @@ -383,6 +398,21 @@ func (mr *MockDiffMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockDiff)(nil).GetSubnetManager), subnetID) } +// GetSubnetOnlyValidator mocks base method. +func (m *MockDiff) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetOnlyValidator", validationID) + ret0, _ := ret[0].(SubnetOnlyValidator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOnlyValidator indicates an expected call of GetSubnetOnlyValidator. +func (mr *MockDiffMockRecorder) GetSubnetOnlyValidator(validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOnlyValidator", reflect.TypeOf((*MockDiff)(nil).GetSubnetOnlyValidator), validationID) +} + // GetSubnetOwner mocks base method. func (m *MockDiff) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -473,6 +503,35 @@ func (mr *MockDiffMockRecorder) HasExpiry(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockDiff)(nil).HasExpiry), arg0) } +// HasSubnetOnlyValidator mocks base method. +func (m *MockDiff) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasSubnetOnlyValidator", subnetID, nodeID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasSubnetOnlyValidator indicates an expected call of HasSubnetOnlyValidator. +func (mr *MockDiffMockRecorder) HasSubnetOnlyValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasSubnetOnlyValidator", reflect.TypeOf((*MockDiff)(nil).HasSubnetOnlyValidator), subnetID, nodeID) +} + +// NumActiveSubnetOnlyValidators mocks base method. +func (m *MockDiff) NumActiveSubnetOnlyValidators() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumActiveSubnetOnlyValidators") + ret0, _ := ret[0].(int) + return ret0 +} + +// NumActiveSubnetOnlyValidators indicates an expected call of NumActiveSubnetOnlyValidators. +func (mr *MockDiffMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockDiff)(nil).NumActiveSubnetOnlyValidators)) +} + // PutCurrentDelegator mocks base method. func (m *MockDiff) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -537,6 +596,20 @@ func (mr *MockDiffMockRecorder) PutPendingValidator(staker any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockDiff)(nil).PutPendingValidator), staker) } +// PutSubnetOnlyValidator mocks base method. +func (m *MockDiff) PutSubnetOnlyValidator(sov SubnetOnlyValidator) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutSubnetOnlyValidator", sov) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutSubnetOnlyValidator indicates an expected call of PutSubnetOnlyValidator. +func (mr *MockDiffMockRecorder) PutSubnetOnlyValidator(sov any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutSubnetOnlyValidator", reflect.TypeOf((*MockDiff)(nil).PutSubnetOnlyValidator), sov) +} + // SetAccruedFees mocks base method. func (m *MockDiff) SetAccruedFees(f uint64) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 2f8ddaa4bc85..8db5f9bec48e 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -319,6 +319,21 @@ func (mr *MockStateMockRecorder) GetAccruedFees() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccruedFees", reflect.TypeOf((*MockState)(nil).GetAccruedFees)) } +// GetActiveSubnetOnlyValidatorsIterator mocks base method. +func (m *MockState) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveSubnetOnlyValidatorsIterator") + ret0, _ := ret[0].(iterator.Iterator[SubnetOnlyValidator]) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActiveSubnetOnlyValidatorsIterator indicates an expected call of GetActiveSubnetOnlyValidatorsIterator. +func (mr *MockStateMockRecorder) GetActiveSubnetOnlyValidatorsIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveSubnetOnlyValidatorsIterator", reflect.TypeOf((*MockState)(nil).GetActiveSubnetOnlyValidatorsIterator)) +} + // GetBlockIDAtHeight mocks base method. func (m *MockState) GetBlockIDAtHeight(height uint64) (ids.ID, error) { m.ctrl.T.Helper() @@ -588,6 +603,21 @@ func (mr *MockStateMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockState)(nil).GetSubnetManager), subnetID) } +// GetSubnetOnlyValidator mocks base method. +func (m *MockState) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetOnlyValidator", validationID) + ret0, _ := ret[0].(SubnetOnlyValidator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOnlyValidator indicates an expected call of GetSubnetOnlyValidator. +func (mr *MockStateMockRecorder) GetSubnetOnlyValidator(validationID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOnlyValidator", reflect.TypeOf((*MockState)(nil).GetSubnetOnlyValidator), validationID) +} + // GetSubnetOwner mocks base method. func (m *MockState) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -694,6 +724,35 @@ func (mr *MockStateMockRecorder) HasExpiry(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasExpiry", reflect.TypeOf((*MockState)(nil).HasExpiry), arg0) } +// HasSubnetOnlyValidator mocks base method. +func (m *MockState) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasSubnetOnlyValidator", subnetID, nodeID) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasSubnetOnlyValidator indicates an expected call of HasSubnetOnlyValidator. +func (mr *MockStateMockRecorder) HasSubnetOnlyValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasSubnetOnlyValidator", reflect.TypeOf((*MockState)(nil).HasSubnetOnlyValidator), subnetID, nodeID) +} + +// NumActiveSubnetOnlyValidators mocks base method. +func (m *MockState) NumActiveSubnetOnlyValidators() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumActiveSubnetOnlyValidators") + ret0, _ := ret[0].(int) + return ret0 +} + +// NumActiveSubnetOnlyValidators indicates an expected call of NumActiveSubnetOnlyValidators. +func (mr *MockStateMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockState)(nil).NumActiveSubnetOnlyValidators)) +} + // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -758,6 +817,20 @@ func (mr *MockStateMockRecorder) PutPendingValidator(staker any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockState)(nil).PutPendingValidator), staker) } +// PutSubnetOnlyValidator mocks base method. +func (m *MockState) PutSubnetOnlyValidator(sov SubnetOnlyValidator) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutSubnetOnlyValidator", sov) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutSubnetOnlyValidator indicates an expected call of PutSubnetOnlyValidator. +func (mr *MockStateMockRecorder) PutSubnetOnlyValidator(sov any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutSubnetOnlyValidator", reflect.TypeOf((*MockState)(nil).PutSubnetOnlyValidator), sov) +} + // ReindexBlocks mocks base method. func (m *MockState) ReindexBlocks(lock sync.Locker, log logging.Logger) error { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index b6961bf3f271..dfe77e87f97b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -83,6 +83,10 @@ var ( SupplyPrefix = []byte("supply") ChainPrefix = []byte("chain") ExpiryReplayProtectionPrefix = []byte("expiryReplayProtection") + SubnetOnlyValidatorsPrefix = []byte("subnetOnlyValidators") + SubnetIDNodeIDPrefix = []byte("subnetIDNodeID") + ActivePrefix = []byte("active") + InactivePrefix = []byte("inactive") SingletonPrefix = []byte("singleton") TimestampKey = []byte("timestamp") @@ -308,6 +312,14 @@ type state struct { expiryDiff *expiryDiff expiryDB database.Database + activeSOVLookup map[ids.ID]SubnetOnlyValidator + activeSOVs *btree.BTreeG[SubnetOnlyValidator] + sovDiff *subnetOnlyValidatorsDiff + subnetOnlyValidatorsDB database.Database + subnetIDNodeIDDB database.Database + activeDB database.Database + inactiveDB database.Database + currentStakers *baseStakers pendingStakers *baseStakers @@ -497,6 +509,8 @@ func New( baseDB := versiondb.New(db) + subnetOnlyValidatorsDB := prefixdb.New(SubnetOnlyValidatorsPrefix, baseDB) + validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB) currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB) @@ -623,6 +637,14 @@ func New( expiryDiff: newExpiryDiff(), expiryDB: prefixdb.New(ExpiryReplayProtectionPrefix, baseDB), + activeSOVLookup: make(map[ids.ID]SubnetOnlyValidator), + activeSOVs: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), + sovDiff: newSubnetOnlyValidatorsDiff(), + subnetOnlyValidatorsDB: subnetOnlyValidatorsDB, + subnetIDNodeIDDB: prefixdb.New(SubnetIDNodeIDPrefix, subnetOnlyValidatorsDB), + activeDB: prefixdb.New(ActivePrefix, subnetOnlyValidatorsDB), + inactiveDB: prefixdb.New(InactivePrefix, subnetOnlyValidatorsDB), + currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -718,6 +740,57 @@ func (s *state) DeleteExpiry(entry ExpiryEntry) { s.expiryDiff.DeleteExpiry(entry) } +func (s *state) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) { + return s.sovDiff.getActiveSubnetOnlyValidatorsIterator( + iterator.FromTree(s.activeSOVs), + ), nil +} + +func (s *state) NumActiveSubnetOnlyValidators() int { + return len(s.activeSOVLookup) + s.sovDiff.numAddedActive +} + +func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { + if sov, modified := s.sovDiff.modified[validationID]; modified { + if sov.Weight == 0 { + return SubnetOnlyValidator{}, database.ErrNotFound + } + return sov, nil + } + + if sov, ok := s.activeSOVLookup[validationID]; ok { + return sov, nil + } + + // TODO: Add caching + sovBytes, err := s.inactiveDB.Get(validationID[:]) + if err != nil { + return SubnetOnlyValidator{}, err + } + + var sov SubnetOnlyValidator + if _, err := block.GenesisCodec.Unmarshal(sovBytes, &sov); err != nil { + return SubnetOnlyValidator{}, err + } + return sov, nil +} + +func (s *state) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) { + if has, modified := s.sovDiff.hasSubnetOnlyValidator(subnetID, nodeID); modified { + return has, nil + } + + // TODO: Add caching + key := make([]byte, len(subnetID)+len(nodeID)) + copy(key, subnetID[:]) + copy(key[len(subnetID):], nodeID[:]) + return s.subnetIDNodeIDDB.Has(key) +} + +func (s *state) PutSubnetOnlyValidator(sov SubnetOnlyValidator) error { + return s.sovDiff.putSubnetOnlyValidator(s, sov) +} + func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { return s.currentStakers.GetValidator(subnetID, nodeID) } @@ -1371,6 +1444,7 @@ func (s *state) load() error { return errors.Join( s.loadMetadata(), s.loadExpiry(), + s.loadActiveSubnetOnlyValidators(), s.loadCurrentValidators(), s.loadPendingValidators(), s.initValidatorSets(), @@ -1459,6 +1533,34 @@ func (s *state) loadExpiry() error { return nil } +func (s *state) loadActiveSubnetOnlyValidators() error { + it := s.activeDB.NewIterator() + defer it.Release() + + for it.Next() { + key := it.Key() + validationID, err := ids.ToID(key) + if err != nil { + return fmt.Errorf("failed to unmarshal ValidationID during load: %w", err) + } + + var ( + value = it.Value() + sov = SubnetOnlyValidator{ + ValidationID: validationID, + } + ) + if _, err := block.GenesisCodec.Unmarshal(value, &sov); err != nil { + return fmt.Errorf("failed to unmarshal SubnetOnlyValidator: %w", err) + } + + s.activeSOVLookup[validationID] = sov + s.activeSOVs.ReplaceOrInsert(sov) + } + + return nil +} + func (s *state) loadCurrentValidators() error { s.currentStakers = newBaseStakers() @@ -1758,6 +1860,7 @@ func (s *state) write(updateValidators bool, height uint64) error { return errors.Join( s.writeBlocks(), s.writeExpiry(), + s.writeSubnetOnlyValidators(), s.writeCurrentStakers(updateValidators, height, codecVersion), s.writePendingStakers(), s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers @@ -2005,6 +2108,75 @@ func (s *state) writeExpiry() error { return nil } +// TODO: Write weight and public key diffs +// TODO: Add caching +func (s *state) writeSubnetOnlyValidators() error { + // Perform deletions: + for validationID, sov := range s.sovDiff.modified { + if sov.Weight != 0 { + continue + } + + subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) + copy(subnetIDNodeIDKey, sov.SubnetID[:]) + copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) + if err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey); err != nil { + return err + } + + var err error + if priorSOV, ok := s.activeSOVLookup[validationID]; ok { + delete(s.activeSOVLookup, validationID) + s.activeSOVs.Delete(priorSOV) + err = s.activeDB.Delete(validationID[:]) + } else { + err = s.inactiveDB.Delete(validationID[:]) + } + if err != nil { + return err + } + } + // Perform additions/modifications: + for validationID, sov := range s.sovDiff.modified { + if sov.Weight == 0 { + continue + } + + subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) + copy(subnetIDNodeIDKey, sov.SubnetID[:]) + copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) + if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, nil); err != nil { + return err + } + + var err error + if priorSOV, ok := s.activeSOVLookup[validationID]; ok { + delete(s.activeSOVLookup, validationID) + s.activeSOVs.Delete(priorSOV) + err = s.activeDB.Delete(validationID[:]) + } else { + err = s.inactiveDB.Delete(validationID[:]) + } + if err != nil { + return err + } + + if sov.isActive() { + s.activeSOVLookup[validationID] = sov + s.activeSOVs.ReplaceOrInsert(sov) + err = putSubnetOnlyValidator(s.activeDB, sov) + } else { + err = putSubnetOnlyValidator(s.inactiveDB, sov) + } + if err != nil { + return err + } + } + + s.sovDiff = newSubnetOnlyValidatorsDiff() + return nil +} + func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { delete(s.currentStakers.validatorDiffs, subnetID) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index cc30ca29f522..87f9ddacb013 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -30,7 +30,7 @@ type SubnetOnlyValidators interface { // NumActiveSubnetOnlyValidators returns the number of currently active // subnet only validators. - NumActiveSubnetOnlyValidators() (int, error) + NumActiveSubnetOnlyValidators() int // GetSubnetOnlyValidator returns the validator with [validationID] if it // exists. If the validator does not exist, [err] will equal @@ -139,10 +139,10 @@ func getSubnetOnlyValidator(db database.KeyValueReader, validationID ids.ID) (Su vdr := SubnetOnlyValidator{ ValidationID: validationID, } - if _, err = block.GenesisCodec.Unmarshal(bytes, &vdr); err != nil { + if _, err := block.GenesisCodec.Unmarshal(bytes, &vdr); err != nil { return SubnetOnlyValidator{}, fmt.Errorf("failed to unmarshal SubnetOnlyValidator: %w", err) } - return vdr, err + return vdr, nil } func putSubnetOnlyValidator(db database.KeyValueWriter, vdr SubnetOnlyValidator) error { From 09faa6bb8bbf0c6e0d0c59f554553dfbc5570e51 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 13 Sep 2024 23:20:24 -0400 Subject: [PATCH 036/155] nit --- vms/platformvm/state/state.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index dfe77e87f97b..95d0a04a70ff 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -763,16 +763,7 @@ func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator } // TODO: Add caching - sovBytes, err := s.inactiveDB.Get(validationID[:]) - if err != nil { - return SubnetOnlyValidator{}, err - } - - var sov SubnetOnlyValidator - if _, err := block.GenesisCodec.Unmarshal(sovBytes, &sov); err != nil { - return SubnetOnlyValidator{}, err - } - return sov, nil + return getSubnetOnlyValidator(s.inactiveDB, validationID) } func (s *state) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) { From 51937f7d1e1f54f918e4357fc603fe1d2949c5f3 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 13 Sep 2024 23:42:36 -0400 Subject: [PATCH 037/155] nit --- vms/platformvm/state/state.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 95d0a04a70ff..2a073b01b852 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1871,6 +1871,10 @@ func (s *state) write(updateValidators bool, height uint64) error { func (s *state) Close() error { return errors.Join( s.expiryDB.Close(), + s.subnetIDNodeIDDB.Close(), + s.activeDB.Close(), + s.inactiveDB.Close(), + s.subnetOnlyValidatorsDB.Close(), s.pendingSubnetValidatorBaseDB.Close(), s.pendingSubnetDelegatorBaseDB.Close(), s.pendingDelegatorBaseDB.Close(), @@ -2119,9 +2123,9 @@ func (s *state) writeSubnetOnlyValidators() error { if priorSOV, ok := s.activeSOVLookup[validationID]; ok { delete(s.activeSOVLookup, validationID) s.activeSOVs.Delete(priorSOV) - err = s.activeDB.Delete(validationID[:]) + err = deleteSubnetOnlyValidator(s.activeDB, validationID) } else { - err = s.inactiveDB.Delete(validationID[:]) + err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) } if err != nil { return err @@ -2144,9 +2148,9 @@ func (s *state) writeSubnetOnlyValidators() error { if priorSOV, ok := s.activeSOVLookup[validationID]; ok { delete(s.activeSOVLookup, validationID) s.activeSOVs.Delete(priorSOV) - err = s.activeDB.Delete(validationID[:]) + err = deleteSubnetOnlyValidator(s.activeDB, validationID) } else { - err = s.inactiveDB.Delete(validationID[:]) + err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) } if err != nil { return err From 88a310e027640b76e95a42755124c0c9f826c04e Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 14 Sep 2024 22:25:55 -0400 Subject: [PATCH 038/155] Add num SoV validators on a subnet --- vms/platformvm/state/diff.go | 13 ++ vms/platformvm/state/diff_test.go | 13 +- vms/platformvm/state/mock_chain.go | 15 +++ vms/platformvm/state/mock_diff.go | 15 +++ vms/platformvm/state/mock_state.go | 15 +++ vms/platformvm/state/state.go | 32 +++++ vms/platformvm/state/subnet_only_validator.go | 111 ++++++++++-------- 7 files changed, 167 insertions(+), 47 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index deb09026282d..1e51717c8d14 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -206,6 +206,19 @@ func (d *diff) NumActiveSubnetOnlyValidators() int { return d.parentActiveSOVs + d.sovDiff.numAddedActive } +func (d *diff) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { + if numSOVs, modified := d.sovDiff.modifiedNumValidators[subnetID]; modified { + return numSOVs, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return 0, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + return parentState.NumSubnetOnlyValidators(subnetID) +} + func (d *diff) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { if sov, modified := d.sovDiff.modified[validationID]; modified { if sov.Weight == 0 { diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index a34a5b40c8f2..e33dc33e0e20 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -513,7 +513,10 @@ func TestDiffSubnetOnlyValidators(t *testing.T) { require.Zero(sov) } - var expectedActive []SubnetOnlyValidator + var ( + numSOVs = make(map[ids.ID]int) + expectedActive []SubnetOnlyValidator + ) for _, expectedSOV := range expectedSOVs { if expectedSOV.Weight == 0 { continue @@ -527,6 +530,7 @@ func TestDiffSubnetOnlyValidators(t *testing.T) { require.NoError(err) require.True(has) + numSOVs[sov.SubnetID]++ if expectedSOV.isActive() { expectedActive = append(expectedActive, expectedSOV) } @@ -541,10 +545,17 @@ func TestDiffSubnetOnlyValidators(t *testing.T) { ) require.Equal(len(expectedActive), chain.NumActiveSubnetOnlyValidators()) + + for subnetID, expectedNumSOVs := range numSOVs { + numSOVs, err := chain.NumSubnetOnlyValidators(subnetID) + require.NoError(err) + require.Equal(expectedNumSOVs, numSOVs) + } } verifyChain(d) require.NoError(d.Apply(state)) + verifyChain(d) verifyChain(state) assertChainsEqual(t, state, d) }) diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 2ce5a181a684..835389f12d79 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -518,6 +518,21 @@ func (mr *MockChainMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockChain)(nil).NumActiveSubnetOnlyValidators)) } +// NumSubnetOnlyValidators mocks base method. +func (m *MockChain) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumSubnetOnlyValidators", subnetID) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NumSubnetOnlyValidators indicates an expected call of NumSubnetOnlyValidators. +func (mr *MockChainMockRecorder) NumSubnetOnlyValidators(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumSubnetOnlyValidators", reflect.TypeOf((*MockChain)(nil).NumSubnetOnlyValidators), subnetID) +} + // PutCurrentDelegator mocks base method. func (m *MockChain) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index c2138705dc66..8c9e48d7845d 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -532,6 +532,21 @@ func (mr *MockDiffMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockDiff)(nil).NumActiveSubnetOnlyValidators)) } +// NumSubnetOnlyValidators mocks base method. +func (m *MockDiff) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumSubnetOnlyValidators", subnetID) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NumSubnetOnlyValidators indicates an expected call of NumSubnetOnlyValidators. +func (mr *MockDiffMockRecorder) NumSubnetOnlyValidators(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumSubnetOnlyValidators", reflect.TypeOf((*MockDiff)(nil).NumSubnetOnlyValidators), subnetID) +} + // PutCurrentDelegator mocks base method. func (m *MockDiff) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 8db5f9bec48e..2adfe08eaf3c 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -753,6 +753,21 @@ func (mr *MockStateMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockState)(nil).NumActiveSubnetOnlyValidators)) } +// NumSubnetOnlyValidators mocks base method. +func (m *MockState) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumSubnetOnlyValidators", subnetID) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NumSubnetOnlyValidators indicates an expected call of NumSubnetOnlyValidators. +func (mr *MockStateMockRecorder) NumSubnetOnlyValidators(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumSubnetOnlyValidators", reflect.TypeOf((*MockState)(nil).NumSubnetOnlyValidators), subnetID) +} + // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 2a073b01b852..7d3754286261 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -14,6 +14,7 @@ import ( "github.com/google/btree" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -84,6 +85,7 @@ var ( ChainPrefix = []byte("chain") ExpiryReplayProtectionPrefix = []byte("expiryReplayProtection") SubnetOnlyValidatorsPrefix = []byte("subnetOnlyValidators") + NumValidatorsPrefix = []byte("numValidators") SubnetIDNodeIDPrefix = []byte("subnetIDNodeID") ActivePrefix = []byte("active") InactivePrefix = []byte("inactive") @@ -316,6 +318,7 @@ type state struct { activeSOVs *btree.BTreeG[SubnetOnlyValidator] sovDiff *subnetOnlyValidatorsDiff subnetOnlyValidatorsDB database.Database + numValidatorsDB database.Database subnetIDNodeIDDB database.Database activeDB database.Database inactiveDB database.Database @@ -641,6 +644,7 @@ func New( activeSOVs: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), sovDiff: newSubnetOnlyValidatorsDiff(), subnetOnlyValidatorsDB: subnetOnlyValidatorsDB, + numValidatorsDB: prefixdb.New(NumValidatorsPrefix, subnetOnlyValidatorsDB), subnetIDNodeIDDB: prefixdb.New(SubnetIDNodeIDPrefix, subnetOnlyValidatorsDB), activeDB: prefixdb.New(ActivePrefix, subnetOnlyValidatorsDB), inactiveDB: prefixdb.New(InactivePrefix, subnetOnlyValidatorsDB), @@ -750,6 +754,25 @@ func (s *state) NumActiveSubnetOnlyValidators() int { return len(s.activeSOVLookup) + s.sovDiff.numAddedActive } +func (s *state) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { + if numSOVs, modified := s.sovDiff.modifiedNumValidators[subnetID]; modified { + return numSOVs, nil + } + + // TODO: Add caching + numSOVs, err := database.GetUInt64(s.numValidatorsDB, subnetID[:]) + if err == database.ErrNotFound { + return 0, nil + } + if err != nil { + return 0, err + } + if numSOVs > math.MaxInt { + return 0, safemath.ErrOverflow + } + return int(numSOVs), nil +} + func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { if sov, modified := s.sovDiff.modified[validationID]; modified { if sov.Weight == 0 { @@ -1871,6 +1894,7 @@ func (s *state) write(updateValidators bool, height uint64) error { func (s *state) Close() error { return errors.Join( s.expiryDB.Close(), + s.numValidatorsDB.Close(), s.subnetIDNodeIDDB.Close(), s.activeDB.Close(), s.inactiveDB.Close(), @@ -2106,6 +2130,14 @@ func (s *state) writeExpiry() error { // TODO: Write weight and public key diffs // TODO: Add caching func (s *state) writeSubnetOnlyValidators() error { + // Write counts: + for subnetID, numValidators := range s.sovDiff.modifiedNumValidators { + if err := database.PutUInt64(s.numValidatorsDB, subnetID[:], uint64(numValidators)); err != nil { + return err + } + } + maps.Clear(s.sovDiff.modifiedNumValidators) + // Perform deletions: for validationID, sov := range s.sovDiff.modified { if sov.Weight != 0 { diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 87f9ddacb013..7463e9b72fc6 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -32,14 +32,17 @@ type SubnetOnlyValidators interface { // subnet only validators. NumActiveSubnetOnlyValidators() int + // NumSubnetOnlyValidators returns the total number of subnet only + // validators on [subnetID]. + NumSubnetOnlyValidators(subnetID ids.ID) (int, error) + // GetSubnetOnlyValidator returns the validator with [validationID] if it // exists. If the validator does not exist, [err] will equal // [database.ErrNotFound]. GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) // HasSubnetOnlyValidator returns the validator with [validationID] if it - // exists. If the validator does not exist, [err] will equal - // [database.ErrNotFound]. + // exists. HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) // PutSubnetOnlyValidator inserts [sov] as a validator. @@ -163,17 +166,19 @@ type subnetIDNodeID struct { } type subnetOnlyValidatorsDiff struct { - numAddedActive int // May be negative - modified map[ids.ID]SubnetOnlyValidator - modifiedHasNodeIDs map[subnetIDNodeID]bool - active *btree.BTreeG[SubnetOnlyValidator] + numAddedActive int // May be negative + modifiedNumValidators map[ids.ID]int // subnetID -> numValidators + modified map[ids.ID]SubnetOnlyValidator + modifiedHasNodeIDs map[subnetIDNodeID]bool + active *btree.BTreeG[SubnetOnlyValidator] } func newSubnetOnlyValidatorsDiff() *subnetOnlyValidatorsDiff { return &subnetOnlyValidatorsDiff{ - modified: make(map[ids.ID]SubnetOnlyValidator), - modifiedHasNodeIDs: make(map[subnetIDNodeID]bool), - active: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), + modifiedNumValidators: make(map[ids.ID]int), + modified: make(map[ids.ID]SubnetOnlyValidator), + modifiedHasNodeIDs: make(map[subnetIDNodeID]bool), + active: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), } } @@ -198,11 +203,59 @@ func (d *subnetOnlyValidatorsDiff) hasSubnetOnlyValidator(subnetID ids.ID, nodeI } func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValidators, sov SubnetOnlyValidator) error { - diff, err := numActiveSubnetOnlyValidatorChange(state, sov) - if err != nil { + var ( + prevExists bool + prevActive bool + newExists = sov.Weight != 0 + newActive = newExists && sov.EndAccumulatedFee != 0 + ) + switch priorSOV, err := state.GetSubnetOnlyValidator(sov.ValidationID); err { + case nil: + if !priorSOV.validateConstants(sov) { + return ErrMutatedSubnetOnlyValidator + } + + prevExists = true + prevActive = priorSOV.EndAccumulatedFee != 0 + case database.ErrNotFound: + if !newExists { + return nil // Removing a validator that didn't exist is a noop + } + + has, err := state.HasSubnetOnlyValidator(sov.SubnetID, sov.NodeID) + if err != nil { + return err + } + if has { + return ErrDuplicateSubnetOnlyValidator + } + default: return err } - d.numAddedActive += diff + + switch { + case prevExists && !newExists: + numSOVs, err := state.NumSubnetOnlyValidators(sov.SubnetID) + if err != nil { + return err + } + + d.modifiedNumValidators[sov.SubnetID] = numSOVs - 1 + case !prevExists && newExists: + numSOVs, err := state.NumSubnetOnlyValidators(sov.SubnetID) + if err != nil { + return err + } + + d.modifiedNumValidators[sov.SubnetID] = numSOVs + 1 + } + + switch { + case prevActive && !newActive: + d.numAddedActive-- + case !prevActive && newActive: + d.numAddedActive++ + } if prevSOV, ok := d.modified[sov.ValidationID]; ok { prevSubnetIDNodeID := subnetIDNodeID{ @@ -227,37 +280,3 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValida d.active.ReplaceOrInsert(sov) return nil } - -// numActiveSubnetOnlyValidatorChange returns the change in the number of active -// subnet only validators if [sov] were to be inserted into [state]. If it is -// invalid for [sov] to be inserted, an error is returned. -func numActiveSubnetOnlyValidatorChange(state SubnetOnlyValidators, sov SubnetOnlyValidator) (int, error) { - switch priorSOV, err := state.GetSubnetOnlyValidator(sov.ValidationID); err { - case nil: - if !priorSOV.validateConstants(sov) { - return 0, ErrMutatedSubnetOnlyValidator - } - switch { - case !priorSOV.isActive() && sov.isActive(): - return 1, nil // Increasing the number of active validators - case priorSOV.isActive() && !sov.isActive(): - return -1, nil // Decreasing the number of active validators - default: - return 0, nil - } - case database.ErrNotFound: - has, err := state.HasSubnetOnlyValidator(sov.SubnetID, sov.NodeID) - if err != nil { - return 0, err - } - if has { - return 0, ErrDuplicateSubnetOnlyValidator - } - if sov.isActive() { - return 1, nil // Increasing the number of active validators - } - return 0, nil // Adding an inactive validator - default: - return 0, err - } -} From b11ff7e40d21d68d3dd74d45b5158c2bc91e8df5 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 15 Sep 2024 10:42:34 -0400 Subject: [PATCH 039/155] fix tests --- vms/platformvm/block/executor/proposal_block_test.go | 2 ++ vms/platformvm/block/executor/standard_block_test.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 66a6c7604e23..0708704ebabf 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -91,6 +91,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() currentStakersIt := iteratormock.NewIterator[*state.Staker](ctrl) currentStakersIt.EXPECT().Next().Return(true) @@ -163,6 +164,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(parentTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() env.blkManager.(*manager).blkIDToState[parentID] = &blockState{ diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index fa64eee74697..1bdd822144f3 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -60,6 +60,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() // wrong height apricotChildBlk, err := block.NewApricotStandardBlock( @@ -138,6 +139,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() txID := ids.GenerateTestID() utxo := &avax.UTXO{ From b028adab408e4e8f8e158d0a4e4f9590aef71a0a Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 15 Sep 2024 14:07:45 -0400 Subject: [PATCH 040/155] write historical diffs --- vms/platformvm/state/state.go | 124 ++++++++++++++++++++++++++++++++-- 1 file changed, 119 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 7d3754286261..4ad67970881b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -4,6 +4,7 @@ package state import ( + "bytes" "context" "errors" "fmt" @@ -781,6 +782,10 @@ func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator return sov, nil } + return s.getPersistedSubnetOnlyValidator(validationID) +} + +func (s *state) getPersistedSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { if sov, ok := s.activeSOVLookup[validationID]; ok { return sov, nil } @@ -1874,7 +1879,7 @@ func (s *state) write(updateValidators bool, height uint64) error { return errors.Join( s.writeBlocks(), s.writeExpiry(), - s.writeSubnetOnlyValidators(), + s.writeSubnetOnlyValidators(height), s.writeCurrentStakers(updateValidators, height, codecVersion), s.writePendingStakers(), s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers @@ -2127,9 +2132,9 @@ func (s *state) writeExpiry() error { return nil } -// TODO: Write weight and public key diffs +// TODO: Update validator sets // TODO: Add caching -func (s *state) writeSubnetOnlyValidators() error { +func (s *state) writeSubnetOnlyValidators(height uint64) error { // Write counts: for subnetID, numValidators := range s.sovDiff.modifiedNumValidators { if err := database.PutUInt64(s.numValidatorsDB, subnetID[:], uint64(numValidators)); err != nil { @@ -2138,6 +2143,32 @@ func (s *state) writeSubnetOnlyValidators() error { } maps.Clear(s.sovDiff.modifiedNumValidators) + historicalDiffs, err := s.makeSubnetOnlyValidatorHistoricalDiffs() + if err != nil { + return err + } + for subnetIDNodeID, diff := range historicalDiffs { + diffKey := marshalDiffKey(subnetIDNodeID.subnetID, height, subnetIDNodeID.nodeID) + if diff.weightDiff.Amount != 0 { + err := s.validatorWeightDiffsDB.Put( + diffKey, + marshalWeightDiff(&diff.weightDiff), + ) + if err != nil { + return err + } + } + if !bytes.Equal(diff.prevPublicKey, diff.newPublicKey) { + err := s.validatorPublicKeyDiffsDB.Put( + diffKey, + diff.prevPublicKey, + ) + if err != nil { + return err + } + } + } + // Perform deletions: for validationID, sov := range s.sovDiff.modified { if sov.Weight != 0 { @@ -2151,8 +2182,12 @@ func (s *state) writeSubnetOnlyValidators() error { return err } - var err error - if priorSOV, ok := s.activeSOVLookup[validationID]; ok { + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + if err != nil { + return err + } + + if priorSOV.isActive() { delete(s.activeSOVLookup, validationID) s.activeSOVs.Delete(priorSOV) err = deleteSubnetOnlyValidator(s.activeDB, validationID) @@ -2204,6 +2239,85 @@ func (s *state) writeSubnetOnlyValidators() error { return nil } +type validatorChanges struct { + weightDiff ValidatorWeightDiff + prevPublicKey []byte + newPublicKey []byte +} + +func getOrDefault[K comparable, V any](m map[K]*V, k K) *V { + if v, ok := m[k]; ok { + return v + } + + v := new(V) + m[k] = v + return v +} + +func (s *state) makeSubnetOnlyValidatorHistoricalDiffs() (map[subnetIDNodeID]*validatorChanges, error) { + changes := make(map[subnetIDNodeID]*validatorChanges, len(s.sovDiff.modified)) + + // Perform deletions: + for validationID := range s.sovDiff.modified { + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + if err == database.ErrNotFound { + continue + } + if err != nil { + return nil, err + } + + var ( + diff *validatorChanges + subnetIDNodeID = subnetIDNodeID{ + subnetID: priorSOV.SubnetID, + } + ) + if priorSOV.isActive() { + subnetIDNodeID.nodeID = priorSOV.NodeID + diff = getOrDefault(changes, subnetIDNodeID) + diff.prevPublicKey = priorSOV.PublicKey + } else { + subnetIDNodeID.nodeID = ids.EmptyNodeID + diff = getOrDefault(changes, subnetIDNodeID) + } + + if err := diff.weightDiff.Add(true, priorSOV.Weight); err != nil { + return nil, err + } + } + + // Perform additions: + for _, sov := range s.sovDiff.modified { + // If the validator is being removed, we shouldn't work to re-add it. + if sov.Weight == 0 { + continue + } + + var ( + diff *validatorChanges + subnetIDNodeID = subnetIDNodeID{ + subnetID: sov.SubnetID, + } + ) + if sov.isActive() { + subnetIDNodeID.nodeID = sov.NodeID + diff = getOrDefault(changes, subnetIDNodeID) + diff.newPublicKey = sov.PublicKey + } else { + subnetIDNodeID.nodeID = ids.EmptyNodeID + diff = getOrDefault(changes, subnetIDNodeID) + } + + if err := diff.weightDiff.Add(false, sov.Weight); err != nil { + return nil, err + } + } + + return changes, nil +} + func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { delete(s.currentStakers.validatorDiffs, subnetID) From 42d61efdacabfeded915d7bbdc5afa9472918c0c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 16 Sep 2024 15:54:54 -0400 Subject: [PATCH 041/155] Implement and test state diffs --- vms/platformvm/state/diff_test.go | 296 ------------- vms/platformvm/state/mock_state.go | 8 +- vms/platformvm/state/state.go | 21 +- vms/platformvm/state/state_test.go | 407 ++++++++++++++++++ vms/platformvm/state/subnet_only_validator.go | 4 - vms/platformvm/validators/manager.go | 6 +- 6 files changed, 430 insertions(+), 312 deletions(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index e33dc33e0e20..187667c34e03 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -266,302 +266,6 @@ func TestDiffExpiry(t *testing.T) { } } -func TestDiffSubnetOnlyValidators(t *testing.T) { - sov := SubnetOnlyValidator{ - ValidationID: ids.GenerateTestID(), - SubnetID: ids.GenerateTestID(), - NodeID: ids.GenerateTestNodeID(), - } - - tests := []struct { - name string - initial []SubnetOnlyValidator - sovs []SubnetOnlyValidator - }{ - { - name: "empty noop", - }, - { - name: "initially active not modified", - initial: []SubnetOnlyValidator{ - { - ValidationID: ids.GenerateTestID(), - SubnetID: ids.GenerateTestID(), - NodeID: ids.GenerateTestNodeID(), - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - }, - }, - { - name: "initially inactive not modified", - initial: []SubnetOnlyValidator{ - { - ValidationID: ids.GenerateTestID(), - SubnetID: ids.GenerateTestID(), - NodeID: ids.GenerateTestNodeID(), - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive - }, - }, - }, - { - name: "initially active removed", - initial: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - }, - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 0, // Removed - }, - }, - }, - { - name: "initially inactive removed", - initial: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive - }, - }, - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 0, // Removed - }, - }, - }, - { - name: "increase active weight", - initial: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - }, - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 2, // Increased - EndAccumulatedFee: 1, // Active - }, - }, - }, - { - name: "deactivate", - initial: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - }, - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive - }, - }, - }, - { - name: "reactivate", - initial: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive - }, - }, - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - }, - }, - { - name: "update multiple times", - initial: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - }, - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 2, // Not removed - EndAccumulatedFee: 1, // Inactive - }, - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 3, // Not removed - EndAccumulatedFee: 1, // Inactive - }, - }, - }, - { - name: "change validationID", - initial: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - }, - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 0, // Removed - }, - { - ValidationID: ids.GenerateTestID(), - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Inactive - }, - }, - }, - { - name: "added and removed", - sovs: []SubnetOnlyValidator{ - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active - }, - { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - Weight: 0, // Removed - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - state := newTestState(t, memdb.New()) - expectedSOVs := make(map[ids.ID]SubnetOnlyValidator) - for _, sov := range test.initial { - require.NoError(state.PutSubnetOnlyValidator(sov)) - expectedSOVs[sov.ValidationID] = sov - } - - d, err := NewDiffOn(state) - require.NoError(err) - - for _, sov := range test.sovs { - require.NoError(d.PutSubnetOnlyValidator(sov)) - expectedSOVs[sov.ValidationID] = sov - } - - verifyChain := func(chain Chain) { - for _, expectedSOV := range expectedSOVs { - if expectedSOV.Weight != 0 { - continue - } - - sov, err := chain.GetSubnetOnlyValidator(expectedSOV.ValidationID) - require.ErrorIs(err, database.ErrNotFound) - require.Zero(sov) - } - - var ( - numSOVs = make(map[ids.ID]int) - expectedActive []SubnetOnlyValidator - ) - for _, expectedSOV := range expectedSOVs { - if expectedSOV.Weight == 0 { - continue - } - - sov, err := chain.GetSubnetOnlyValidator(expectedSOV.ValidationID) - require.NoError(err) - require.Equal(expectedSOV, sov) - - has, err := chain.HasSubnetOnlyValidator(expectedSOV.SubnetID, expectedSOV.NodeID) - require.NoError(err) - require.True(has) - - numSOVs[sov.SubnetID]++ - if expectedSOV.isActive() { - expectedActive = append(expectedActive, expectedSOV) - } - } - utils.Sort(expectedActive) - - activeIterator, err := chain.GetActiveSubnetOnlyValidatorsIterator() - require.NoError(err) - require.Equal( - expectedActive, - iterator.ToSlice(activeIterator), - ) - - require.Equal(len(expectedActive), chain.NumActiveSubnetOnlyValidators()) - - for subnetID, expectedNumSOVs := range numSOVs { - numSOVs, err := chain.NumSubnetOnlyValidators(subnetID) - require.NoError(err) - require.Equal(expectedNumSOVs, numSOVs) - } - } - - verifyChain(d) - require.NoError(d.Apply(state)) - verifyChain(d) - verifyChain(state) - assertChainsEqual(t, state, d) - }) - } -} - func TestDiffSubnetOnlyValidatorsErrors(t *testing.T) { sov := SubnetOnlyValidator{ ValidationID: ids.GenerateTestID(), diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 2adfe08eaf3c..0f67d6607ba3 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -149,17 +149,17 @@ func (mr *MockStateMockRecorder) AddUTXO(utxo any) *gomock.Call { } // ApplyValidatorPublicKeyDiffs mocks base method. -func (m *MockState) ApplyValidatorPublicKeyDiffs(ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight, endHeight uint64) error { +func (m *MockState) ApplyValidatorPublicKeyDiffs(ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight, endHeight uint64, subnetID ids.ID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyValidatorPublicKeyDiffs", ctx, validators, startHeight, endHeight) + ret := m.ctrl.Call(m, "ApplyValidatorPublicKeyDiffs", ctx, validators, startHeight, endHeight, subnetID) ret0, _ := ret[0].(error) return ret0 } // ApplyValidatorPublicKeyDiffs indicates an expected call of ApplyValidatorPublicKeyDiffs. -func (mr *MockStateMockRecorder) ApplyValidatorPublicKeyDiffs(ctx, validators, startHeight, endHeight any) *gomock.Call { +func (mr *MockStateMockRecorder) ApplyValidatorPublicKeyDiffs(ctx, validators, startHeight, endHeight, subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorPublicKeyDiffs), ctx, validators, startHeight, endHeight) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorPublicKeyDiffs), ctx, validators, startHeight, endHeight, subnetID) } // ApplyValidatorWeightDiffs mocks base method. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 4ad67970881b..ba4fcb46eb05 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -197,6 +197,7 @@ type State interface { validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight uint64, endHeight uint64, + subnetID ids.ID, ) error SetHeight(height uint64) @@ -1336,10 +1337,11 @@ func (s *state) ApplyValidatorPublicKeyDiffs( validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight uint64, endHeight uint64, + subnetID ids.ID, ) error { diffIter := s.validatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix( - marshalStartDiffKey(constants.PrimaryNetworkID, startHeight), - constants.PrimaryNetworkID[:], + marshalStartDiffKey(subnetID, startHeight), + subnetID[:], ) defer diffIter.Release() @@ -2175,6 +2177,16 @@ func (s *state) writeSubnetOnlyValidators(height uint64) error { continue } + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + if err == database.ErrNotFound { + // Deleting a non-existent validator is a noop. This can happen if + // the validator was added and then immediately removed. + continue + } + if err != nil { + return err + } + subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) copy(subnetIDNodeIDKey, sov.SubnetID[:]) copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) @@ -2182,11 +2194,6 @@ func (s *state) writeSubnetOnlyValidators(height uint64) error { return err } - priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err != nil { - return err - } - if priorSOV.isActive() { delete(s.activeSOVLookup, validationID) s.activeSOVs.Delete(priorSOV) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index a0dcd0cb7dcc..0beff373a0bf 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -6,6 +6,7 @@ package state import ( "context" "fmt" + "maps" "math" "math/rand" "sync" @@ -24,10 +25,12 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/upgrade/upgradetest" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -1156,6 +1159,7 @@ func TestStateAddRemoveValidator(t *testing.T) { primaryValidatorSet, currentHeight, prevHeight+1, + constants.PrimaryNetworkID, )) requireEqualPublicKeysValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) @@ -1609,3 +1613,406 @@ func TestStateExpiryCommitAndLoad(t *testing.T) { require.NoError(err) require.False(has) } + +func TestSubnetOnlyValidators(t *testing.T) { + sov := SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + } + + sk, err := bls.NewSecretKey() + require.NoError(t, err) + pk := bls.PublicFromSecretKey(sk) + pkBytes := bls.PublicKeyToUncompressedBytes(pk) + + otherSK, err := bls.NewSecretKey() + require.NoError(t, err) + otherPK := bls.PublicFromSecretKey(otherSK) + otherPKBytes := bls.PublicKeyToUncompressedBytes(otherPK) + + tests := []struct { + name string + initial []SubnetOnlyValidator + sovs []SubnetOnlyValidator + }{ + { + name: "empty noop", + }, + { + name: "initially active not modified", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + }, + { + name: "initially inactive not modified", + initial: []SubnetOnlyValidator{ + { + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + }, + { + name: "initially active removed", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 0, // Removed + }, + }, + }, + { + name: "initially inactive removed", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 0, // Removed + }, + }, + }, + { + name: "increase active weight", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 2, // Increased + EndAccumulatedFee: 1, // Active + }, + }, + }, + { + name: "deactivate", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + }, + { + name: "reactivate", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + }, + { + name: "update multiple times", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 2, // Not removed + EndAccumulatedFee: 1, // Inactive + }, + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 3, // Not removed + EndAccumulatedFee: 1, // Inactive + }, + }, + }, + { + name: "change validationID", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 0, // Removed + }, + { + ValidationID: ids.GenerateTestID(), + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: otherPKBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Inactive + }, + }, + }, + { + name: "added and removed", + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active + }, + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + RemainingBalanceOwner: []byte{}, + Weight: 0, // Removed + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + state := newTestState(t, memdb.New()) + + var ( + initialSOVs = make(map[ids.ID]SubnetOnlyValidator) + subnetIDs set.Set[ids.ID] + ) + for _, sov := range test.initial { + require.NoError(state.PutSubnetOnlyValidator(sov)) + initialSOVs[sov.ValidationID] = sov + subnetIDs.Add(sov.SubnetID) + } + + state.SetHeight(0) + require.NoError(state.Commit()) + + d, err := NewDiffOn(state) + require.NoError(err) + + expectedSOVs := maps.Clone(initialSOVs) + for _, sov := range test.sovs { + require.NoError(d.PutSubnetOnlyValidator(sov)) + expectedSOVs[sov.ValidationID] = sov + subnetIDs.Add(sov.SubnetID) + } + + verifyChain := func(chain Chain) { + for _, expectedSOV := range expectedSOVs { + if expectedSOV.Weight != 0 { + continue + } + + sov, err := chain.GetSubnetOnlyValidator(expectedSOV.ValidationID) + require.ErrorIs(err, database.ErrNotFound) + require.Zero(sov) + } + + var ( + numSOVs = make(map[ids.ID]int) + expectedActive []SubnetOnlyValidator + ) + for _, expectedSOV := range expectedSOVs { + if expectedSOV.Weight == 0 { + continue + } + + sov, err := chain.GetSubnetOnlyValidator(expectedSOV.ValidationID) + require.NoError(err) + require.Equal(expectedSOV, sov) + + has, err := chain.HasSubnetOnlyValidator(expectedSOV.SubnetID, expectedSOV.NodeID) + require.NoError(err) + require.True(has) + + numSOVs[sov.SubnetID]++ + if expectedSOV.isActive() { + expectedActive = append(expectedActive, expectedSOV) + } + } + utils.Sort(expectedActive) + + activeIterator, err := chain.GetActiveSubnetOnlyValidatorsIterator() + require.NoError(err) + require.Equal( + expectedActive, + iterator.ToSlice(activeIterator), + ) + + require.Equal(len(expectedActive), chain.NumActiveSubnetOnlyValidators()) + + for subnetID, expectedNumSOVs := range numSOVs { + numSOVs, err := chain.NumSubnetOnlyValidators(subnetID) + require.NoError(err) + require.Equal(expectedNumSOVs, numSOVs) + } + } + + verifyChain(d) + require.NoError(d.Apply(state)) + verifyChain(d) + verifyChain(state) + assertChainsEqual(t, state, d) + + state.SetHeight(1) + require.NoError(state.Commit()) + verifyChain(d) + verifyChain(state) + assertChainsEqual(t, state, d) + + sovsToValidatorSet := func( + sovs map[ids.ID]SubnetOnlyValidator, + subnetID ids.ID, + ) map[ids.NodeID]*validators.GetValidatorOutput { + validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput) + for _, sov := range sovs { + if sov.SubnetID != subnetID || sov.Weight == 0 { + continue + } + + nodeID := sov.NodeID + publicKey := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) + // Inactive validators are combined into a single validator + // with the empty ID. + if sov.EndAccumulatedFee == 0 { + nodeID = ids.EmptyNodeID + publicKey = nil + } + + vdr, ok := validatorSet[nodeID] + if !ok { + vdr = &validators.GetValidatorOutput{ + NodeID: nodeID, + PublicKey: publicKey, + } + validatorSet[nodeID] = vdr + } + vdr.Weight += sov.Weight + } + return validatorSet + } + + for subnetID := range subnetIDs { + expectedValidatorSet := sovsToValidatorSet(initialSOVs, subnetID) + endValidatorSet := sovsToValidatorSet(expectedSOVs, subnetID) + + require.NoError(state.ApplyValidatorWeightDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) + require.NoError(state.ApplyValidatorPublicKeyDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) + require.Equal(expectedValidatorSet, endValidatorSet) + } + }) + } +} diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 7463e9b72fc6..3884abcdcbaf 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -218,10 +218,6 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValida prevExists = true prevActive = priorSOV.EndAccumulatedFee != 0 case database.ErrNotFound: - if !newExists { - return nil // Removing a validator that didn't exist is a noop - } - has, err := state.HasSubnetOnlyValidator(sov.SubnetID, sov.NodeID) if err != nil { return err diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index 781d119e226b..02ff6e475ab2 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -85,6 +85,7 @@ type State interface { validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight uint64, endHeight uint64, + subnetID ids.ID, ) error } @@ -271,7 +272,7 @@ func (m *manager) makePrimaryNetworkValidatorSet( validatorSet, currentHeight, lastDiffHeight, - constants.PlatformChainID, + constants.PrimaryNetworkID, ) if err != nil { return nil, 0, err @@ -282,6 +283,7 @@ func (m *manager) makePrimaryNetworkValidatorSet( validatorSet, currentHeight, lastDiffHeight, + constants.PrimaryNetworkID, ) return validatorSet, currentHeight, err } @@ -343,11 +345,13 @@ func (m *manager) makeSubnetValidatorSet( } } + // Prior to ACP-77, public keys were inherited from the primary network. err = m.state.ApplyValidatorPublicKeyDiffs( ctx, subnetValidatorSet, currentHeight, lastDiffHeight, + constants.PrimaryNetworkID, ) return subnetValidatorSet, currentHeight, err } From 8ef4fcff970f7cf4a89e507f9ef9e3b389e30716 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 16 Sep 2024 17:47:58 -0400 Subject: [PATCH 042/155] Store weights rather than counts --- vms/platformvm/state/diff.go | 8 +-- vms/platformvm/state/mock_chain.go | 30 +++++----- vms/platformvm/state/mock_diff.go | 30 +++++----- vms/platformvm/state/mock_state.go | 30 +++++----- vms/platformvm/state/state.go | 32 +++++------ vms/platformvm/state/state_test.go | 10 ++-- vms/platformvm/state/subnet_only_validator.go | 55 +++++++++++-------- 7 files changed, 100 insertions(+), 95 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 1e51717c8d14..ae0568650580 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -206,9 +206,9 @@ func (d *diff) NumActiveSubnetOnlyValidators() int { return d.parentActiveSOVs + d.sovDiff.numAddedActive } -func (d *diff) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { - if numSOVs, modified := d.sovDiff.modifiedNumValidators[subnetID]; modified { - return numSOVs, nil +func (d *diff) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { + if weight, modified := d.sovDiff.modifiedTotalWeight[subnetID]; modified { + return weight, nil } parentState, ok := d.stateVersions.GetState(d.parentID) @@ -216,7 +216,7 @@ func (d *diff) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { return 0, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) } - return parentState.NumSubnetOnlyValidators(subnetID) + return parentState.WeightOfSubnetOnlyValidators(subnetID) } func (d *diff) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 835389f12d79..5077a16ff69e 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -518,21 +518,6 @@ func (mr *MockChainMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockChain)(nil).NumActiveSubnetOnlyValidators)) } -// NumSubnetOnlyValidators mocks base method. -func (m *MockChain) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NumSubnetOnlyValidators", subnetID) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NumSubnetOnlyValidators indicates an expected call of NumSubnetOnlyValidators. -func (mr *MockChainMockRecorder) NumSubnetOnlyValidators(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumSubnetOnlyValidators", reflect.TypeOf((*MockChain)(nil).NumSubnetOnlyValidators), subnetID) -} - // PutCurrentDelegator mocks base method. func (m *MockChain) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -696,3 +681,18 @@ func (mr *MockChainMockRecorder) SetTimestamp(tm any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), tm) } + +// WeightOfSubnetOnlyValidators mocks base method. +func (m *MockChain) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WeightOfSubnetOnlyValidators", subnetID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WeightOfSubnetOnlyValidators indicates an expected call of WeightOfSubnetOnlyValidators. +func (mr *MockChainMockRecorder) WeightOfSubnetOnlyValidators(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WeightOfSubnetOnlyValidators", reflect.TypeOf((*MockChain)(nil).WeightOfSubnetOnlyValidators), subnetID) +} diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index 8c9e48d7845d..1112451386f4 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -532,21 +532,6 @@ func (mr *MockDiffMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockDiff)(nil).NumActiveSubnetOnlyValidators)) } -// NumSubnetOnlyValidators mocks base method. -func (m *MockDiff) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NumSubnetOnlyValidators", subnetID) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NumSubnetOnlyValidators indicates an expected call of NumSubnetOnlyValidators. -func (mr *MockDiffMockRecorder) NumSubnetOnlyValidators(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumSubnetOnlyValidators", reflect.TypeOf((*MockDiff)(nil).NumSubnetOnlyValidators), subnetID) -} - // PutCurrentDelegator mocks base method. func (m *MockDiff) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -710,3 +695,18 @@ func (mr *MockDiffMockRecorder) SetTimestamp(tm any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), tm) } + +// WeightOfSubnetOnlyValidators mocks base method. +func (m *MockDiff) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WeightOfSubnetOnlyValidators", subnetID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WeightOfSubnetOnlyValidators indicates an expected call of WeightOfSubnetOnlyValidators. +func (mr *MockDiffMockRecorder) WeightOfSubnetOnlyValidators(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WeightOfSubnetOnlyValidators", reflect.TypeOf((*MockDiff)(nil).WeightOfSubnetOnlyValidators), subnetID) +} diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 0f67d6607ba3..1abe784ef272 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -753,21 +753,6 @@ func (mr *MockStateMockRecorder) NumActiveSubnetOnlyValidators() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveSubnetOnlyValidators", reflect.TypeOf((*MockState)(nil).NumActiveSubnetOnlyValidators)) } -// NumSubnetOnlyValidators mocks base method. -func (m *MockState) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NumSubnetOnlyValidators", subnetID) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NumSubnetOnlyValidators indicates an expected call of NumSubnetOnlyValidators. -func (mr *MockStateMockRecorder) NumSubnetOnlyValidators(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumSubnetOnlyValidators", reflect.TypeOf((*MockState)(nil).NumSubnetOnlyValidators), subnetID) -} - // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(staker *Staker) { m.ctrl.T.Helper() @@ -998,3 +983,18 @@ func (mr *MockStateMockRecorder) UTXOIDs(addr, previous, limit any) *gomock.Call mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UTXOIDs", reflect.TypeOf((*MockState)(nil).UTXOIDs), addr, previous, limit) } + +// WeightOfSubnetOnlyValidators mocks base method. +func (m *MockState) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WeightOfSubnetOnlyValidators", subnetID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WeightOfSubnetOnlyValidators indicates an expected call of WeightOfSubnetOnlyValidators. +func (mr *MockStateMockRecorder) WeightOfSubnetOnlyValidators(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WeightOfSubnetOnlyValidators", reflect.TypeOf((*MockState)(nil).WeightOfSubnetOnlyValidators), subnetID) +} diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index ba4fcb46eb05..ac572364e75a 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -86,7 +86,7 @@ var ( ChainPrefix = []byte("chain") ExpiryReplayProtectionPrefix = []byte("expiryReplayProtection") SubnetOnlyValidatorsPrefix = []byte("subnetOnlyValidators") - NumValidatorsPrefix = []byte("numValidators") + WeightsPrefix = []byte("weights") SubnetIDNodeIDPrefix = []byte("subnetIDNodeID") ActivePrefix = []byte("active") InactivePrefix = []byte("inactive") @@ -320,7 +320,7 @@ type state struct { activeSOVs *btree.BTreeG[SubnetOnlyValidator] sovDiff *subnetOnlyValidatorsDiff subnetOnlyValidatorsDB database.Database - numValidatorsDB database.Database + weightsDB database.Database subnetIDNodeIDDB database.Database activeDB database.Database inactiveDB database.Database @@ -646,7 +646,7 @@ func New( activeSOVs: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), sovDiff: newSubnetOnlyValidatorsDiff(), subnetOnlyValidatorsDB: subnetOnlyValidatorsDB, - numValidatorsDB: prefixdb.New(NumValidatorsPrefix, subnetOnlyValidatorsDB), + weightsDB: prefixdb.New(WeightsPrefix, subnetOnlyValidatorsDB), subnetIDNodeIDDB: prefixdb.New(SubnetIDNodeIDPrefix, subnetOnlyValidatorsDB), activeDB: prefixdb.New(ActivePrefix, subnetOnlyValidatorsDB), inactiveDB: prefixdb.New(InactivePrefix, subnetOnlyValidatorsDB), @@ -756,23 +756,17 @@ func (s *state) NumActiveSubnetOnlyValidators() int { return len(s.activeSOVLookup) + s.sovDiff.numAddedActive } -func (s *state) NumSubnetOnlyValidators(subnetID ids.ID) (int, error) { - if numSOVs, modified := s.sovDiff.modifiedNumValidators[subnetID]; modified { - return numSOVs, nil +func (s *state) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { + if weight, modified := s.sovDiff.modifiedTotalWeight[subnetID]; modified { + return weight, nil } // TODO: Add caching - numSOVs, err := database.GetUInt64(s.numValidatorsDB, subnetID[:]) + weight, err := database.GetUInt64(s.weightsDB, subnetID[:]) if err == database.ErrNotFound { return 0, nil } - if err != nil { - return 0, err - } - if numSOVs > math.MaxInt { - return 0, safemath.ErrOverflow - } - return int(numSOVs), nil + return weight, err } func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { @@ -1901,7 +1895,7 @@ func (s *state) write(updateValidators bool, height uint64) error { func (s *state) Close() error { return errors.Join( s.expiryDB.Close(), - s.numValidatorsDB.Close(), + s.weightsDB.Close(), s.subnetIDNodeIDDB.Close(), s.activeDB.Close(), s.inactiveDB.Close(), @@ -2137,13 +2131,13 @@ func (s *state) writeExpiry() error { // TODO: Update validator sets // TODO: Add caching func (s *state) writeSubnetOnlyValidators(height uint64) error { - // Write counts: - for subnetID, numValidators := range s.sovDiff.modifiedNumValidators { - if err := database.PutUInt64(s.numValidatorsDB, subnetID[:], uint64(numValidators)); err != nil { + // Write modified weights: + for subnetID, weight := range s.sovDiff.modifiedTotalWeight { + if err := database.PutUInt64(s.weightsDB, subnetID[:], weight); err != nil { return err } } - maps.Clear(s.sovDiff.modifiedNumValidators) + maps.Clear(s.sovDiff.modifiedTotalWeight) historicalDiffs, err := s.makeSubnetOnlyValidatorHistoricalDiffs() if err != nil { diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 0beff373a0bf..62d155283bdc 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1922,7 +1922,7 @@ func TestSubnetOnlyValidators(t *testing.T) { } var ( - numSOVs = make(map[ids.ID]int) + weights = make(map[ids.ID]uint64) expectedActive []SubnetOnlyValidator ) for _, expectedSOV := range expectedSOVs { @@ -1938,7 +1938,7 @@ func TestSubnetOnlyValidators(t *testing.T) { require.NoError(err) require.True(has) - numSOVs[sov.SubnetID]++ + weights[sov.SubnetID] += sov.Weight if expectedSOV.isActive() { expectedActive = append(expectedActive, expectedSOV) } @@ -1954,10 +1954,10 @@ func TestSubnetOnlyValidators(t *testing.T) { require.Equal(len(expectedActive), chain.NumActiveSubnetOnlyValidators()) - for subnetID, expectedNumSOVs := range numSOVs { - numSOVs, err := chain.NumSubnetOnlyValidators(subnetID) + for subnetID, expectedWeight := range weights { + weight, err := chain.WeightOfSubnetOnlyValidators(subnetID) require.NoError(err) - require.Equal(expectedNumSOVs, numSOVs) + require.Equal(expectedWeight, weight) } } diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 3884abcdcbaf..9b505cb22842 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -14,6 +14,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/vms/platformvm/block" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -32,9 +34,9 @@ type SubnetOnlyValidators interface { // subnet only validators. NumActiveSubnetOnlyValidators() int - // NumSubnetOnlyValidators returns the total number of subnet only - // validators on [subnetID]. - NumSubnetOnlyValidators(subnetID ids.ID) (int, error) + // WeightOfSubnetOnlyValidators returns the total active and inactive weight + // of subnet only validators on [subnetID]. + WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) // GetSubnetOnlyValidator returns the validator with [validationID] if it // exists. If the validator does not exist, [err] will equal @@ -166,19 +168,19 @@ type subnetIDNodeID struct { } type subnetOnlyValidatorsDiff struct { - numAddedActive int // May be negative - modifiedNumValidators map[ids.ID]int // subnetID -> numValidators - modified map[ids.ID]SubnetOnlyValidator - modifiedHasNodeIDs map[subnetIDNodeID]bool - active *btree.BTreeG[SubnetOnlyValidator] + numAddedActive int // May be negative + modifiedTotalWeight map[ids.ID]uint64 // subnetID -> totalWeight + modified map[ids.ID]SubnetOnlyValidator + modifiedHasNodeIDs map[subnetIDNodeID]bool + active *btree.BTreeG[SubnetOnlyValidator] } func newSubnetOnlyValidatorsDiff() *subnetOnlyValidatorsDiff { return &subnetOnlyValidatorsDiff{ - modifiedNumValidators: make(map[ids.ID]int), - modified: make(map[ids.ID]SubnetOnlyValidator), - modifiedHasNodeIDs: make(map[subnetIDNodeID]bool), - active: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), + modifiedTotalWeight: make(map[ids.ID]uint64), + modified: make(map[ids.ID]SubnetOnlyValidator), + modifiedHasNodeIDs: make(map[subnetIDNodeID]bool), + active: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), } } @@ -204,10 +206,9 @@ func (d *subnetOnlyValidatorsDiff) hasSubnetOnlyValidator(subnetID ids.ID, nodeI func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValidators, sov SubnetOnlyValidator) error { var ( - prevExists bool + prevWeight uint64 prevActive bool - newExists = sov.Weight != 0 - newActive = newExists && sov.EndAccumulatedFee != 0 + newActive = sov.Weight != 0 && sov.EndAccumulatedFee != 0 ) switch priorSOV, err := state.GetSubnetOnlyValidator(sov.ValidationID); err { case nil: @@ -215,7 +216,7 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValida return ErrMutatedSubnetOnlyValidator } - prevExists = true + prevWeight = priorSOV.Weight prevActive = priorSOV.EndAccumulatedFee != 0 case database.ErrNotFound: has, err := state.HasSubnetOnlyValidator(sov.SubnetID, sov.NodeID) @@ -230,20 +231,30 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValida } switch { - case prevExists && !newExists: - numSOVs, err := state.NumSubnetOnlyValidators(sov.SubnetID) + case prevWeight < sov.Weight: + weight, err := state.WeightOfSubnetOnlyValidators(sov.SubnetID) + if err != nil { + return err + } + + weight, err = safemath.Add(weight, sov.Weight-prevWeight) + if err != nil { + return err + } + + d.modifiedTotalWeight[sov.SubnetID] = weight + case prevWeight > sov.Weight: + weight, err := state.WeightOfSubnetOnlyValidators(sov.SubnetID) if err != nil { return err } - d.modifiedNumValidators[sov.SubnetID] = numSOVs - 1 - case !prevExists && newExists: - numSOVs, err := state.NumSubnetOnlyValidators(sov.SubnetID) + weight, err = safemath.Sub(weight, prevWeight-sov.Weight) if err != nil { return err } - d.modifiedNumValidators[sov.SubnetID] = numSOVs + 1 + d.modifiedTotalWeight[sov.SubnetID] = weight } switch { From e2045e23b136a9895d76fb40e7e7d7bba952ae46 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 16 Sep 2024 17:58:10 -0400 Subject: [PATCH 043/155] load sov validators on startup --- vms/platformvm/state/state.go | 48 ++++++++++++++++++++++++++++-- vms/platformvm/state/state_test.go | 13 +++++--- 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index ac572364e75a..b34c9ea16f2e 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1830,9 +1830,53 @@ func (s *state) loadPendingValidators() error { ) } -// Invariant: initValidatorSets requires loadCurrentValidators to have already -// been called. +// Invariant: initValidatorSets requires loadActiveSubnetOnlyValidators and +// loadCurrentValidators to have already been called. func (s *state) initValidatorSets() error { + // Load ACP77 validators + for validationID, sov := range s.activeSOVLookup { + pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) + if err := s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { + return err + } + } + + // Load inactive weights + it := s.weightsDB.NewIterator() + defer it.Release() + + for it.Next() { + subnetID, err := ids.ToID(it.Key()) + if err != nil { + return err + } + + totalWeight, err := database.ParseUInt64(it.Value()) + if err != nil { + return err + } + + activeWeight, err := s.validators.TotalWeight(subnetID) + if err != nil { + return err + } + + inactiveWeight, err := safemath.Sub(totalWeight, activeWeight) + if err != nil { + // This should never happen, as the total weight should always be at + // least the sum of the active weights. + return err + } + if inactiveWeight == 0 { + continue + } + + if err := s.validators.AddStaker(subnetID, ids.EmptyNodeID, nil, ids.Empty, inactiveWeight); err != nil { + return err + } + } + + // Load primary network and non-ACP77 validators for subnetID, validators := range s.currentStakers.validators { if s.validators.Count(subnetID) != 0 { // Enforce the invariant that the validator set is empty here. diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 62d155283bdc..318e52b196b5 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1885,7 +1885,8 @@ func TestSubnetOnlyValidators(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - state := newTestState(t, memdb.New()) + db := memdb.New() + state := newTestState(t, db) var ( initialSOVs = make(map[ids.ID]SubnetOnlyValidator) @@ -2005,13 +2006,17 @@ func TestSubnetOnlyValidators(t *testing.T) { return validatorSet } + reloadedState := newTestState(t, db) for subnetID := range subnetIDs { - expectedValidatorSet := sovsToValidatorSet(initialSOVs, subnetID) - endValidatorSet := sovsToValidatorSet(expectedSOVs, subnetID) + expectedEndValidatorSet := sovsToValidatorSet(expectedSOVs, subnetID) + endValidatorSet := reloadedState.validators.GetMap(subnetID) + require.Equal(expectedEndValidatorSet, endValidatorSet) require.NoError(state.ApplyValidatorWeightDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) require.NoError(state.ApplyValidatorPublicKeyDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) - require.Equal(expectedValidatorSet, endValidatorSet) + + initialValidatorSet := sovsToValidatorSet(initialSOVs, subnetID) + require.Equal(initialValidatorSet, endValidatorSet) } }) } From e0beab1f26b19d5af7bea74cb08915a1cc032f0b Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 16 Sep 2024 22:59:40 -0400 Subject: [PATCH 044/155] update in-memory validator sets --- vms/platformvm/state/state.go | 132 ++++++++++++++++++++++++++--- vms/platformvm/state/state_test.go | 5 +- 2 files changed, 123 insertions(+), 14 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index b34c9ea16f2e..5ac7419102bf 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1919,7 +1919,7 @@ func (s *state) write(updateValidators bool, height uint64) error { return errors.Join( s.writeBlocks(), s.writeExpiry(), - s.writeSubnetOnlyValidators(height), + s.writeSubnetOnlyValidators(updateValidators, height), s.writeCurrentStakers(updateValidators, height, codecVersion), s.writePendingStakers(), s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers @@ -2174,7 +2174,7 @@ func (s *state) writeExpiry() error { // TODO: Update validator sets // TODO: Add caching -func (s *state) writeSubnetOnlyValidators(height uint64) error { +func (s *state) writeSubnetOnlyValidators(updateValidators bool, height uint64) error { // Write modified weights: for subnetID, weight := range s.sovDiff.modifiedTotalWeight { if err := database.PutUInt64(s.weightsDB, subnetID[:], weight); err != nil { @@ -2209,12 +2209,17 @@ func (s *state) writeSubnetOnlyValidators(height uint64) error { } } + sovChanges := s.sovDiff.modified // Perform deletions: - for validationID, sov := range s.sovDiff.modified { + for validationID, sov := range sovChanges { if sov.Weight != 0 { + // Additions and modifications are handled in the next loops. continue } + // The next loops shouldn't consider this change. + delete(sovChanges, validationID) + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) if err == database.ErrNotFound { // Deleting a non-existent validator is a noop. This can happen if @@ -2242,22 +2247,33 @@ func (s *state) writeSubnetOnlyValidators(height uint64) error { if err != nil { return err } - } - // Perform additions/modifications: - for validationID, sov := range s.sovDiff.modified { - if sov.Weight == 0 { + + // TODO: Move the validator set management out of the state package + if !updateValidators { continue } - subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) - copy(subnetIDNodeIDKey, sov.SubnetID[:]) - copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) - if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, nil); err != nil { + nodeID := ids.EmptyNodeID + if priorSOV.isActive() { + nodeID = priorSOV.NodeID + } + if err := s.validators.RemoveWeight(priorSOV.SubnetID, nodeID, priorSOV.Weight); err != nil { + return fmt.Errorf("failed to delete SoV validator: %w", err) + } + } + + // Perform modifications: + for validationID, sov := range sovChanges { + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + if err == database.ErrNotFound { + // New additions are handled in the next loop. + continue + } + if err != nil { return err } - var err error - if priorSOV, ok := s.activeSOVLookup[validationID]; ok { + if priorSOV.isActive() { delete(s.activeSOVLookup, validationID) s.activeSOVs.Delete(priorSOV) err = deleteSubnetOnlyValidator(s.activeDB, validationID) @@ -2278,6 +2294,96 @@ func (s *state) writeSubnetOnlyValidators(height uint64) error { if err != nil { return err } + + // The next loop shouldn't consider this change. + delete(sovChanges, validationID) + + // TODO: Move the validator set management out of the state package + if !updateValidators { + continue + } + + switch { + case !priorSOV.isActive() && sov.isActive(): + // This validator is being activated. + pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) + err = errors.Join( + s.validators.RemoveWeight(sov.SubnetID, ids.EmptyNodeID, priorSOV.Weight), + s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight), + ) + case priorSOV.isActive() && !sov.isActive(): + // This validator is being deactivated. + inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) + if inactiveWeight == 0 { + err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) + } else { + err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) + } + err = errors.Join( + err, + s.validators.RemoveWeight(sov.SubnetID, sov.NodeID, priorSOV.Weight), + ) + default: + // This validator's active status isn't changing. + nodeID := ids.EmptyNodeID + if sov.isActive() { + nodeID = sov.NodeID + } + if priorSOV.Weight < sov.Weight { + err = s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight-priorSOV.Weight) + } else if priorSOV.Weight > sov.Weight { + err = s.validators.RemoveWeight(sov.SubnetID, nodeID, priorSOV.Weight-sov.Weight) + } + } + if err != nil { + return err + } + } + + // Perform additions: + for validationID, sov := range sovChanges { + subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) + copy(subnetIDNodeIDKey, sov.SubnetID[:]) + copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) + if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, nil); err != nil { + return err + } + + isActive := sov.isActive() + if isActive { + s.activeSOVLookup[validationID] = sov + s.activeSOVs.ReplaceOrInsert(sov) + err = putSubnetOnlyValidator(s.activeDB, sov) + } else { + err = putSubnetOnlyValidator(s.inactiveDB, sov) + } + if err != nil { + return err + } + + // TODO: Move the validator set management out of the state package + if !updateValidators { + continue + } + + if isActive { + pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) + if err := s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { + return fmt.Errorf("failed to add SoV validator: %w", err) + } + continue + } + + // This validator is inactive + inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) + if inactiveWeight == 0 { + err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) + } else { + err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) + } + if err != nil { + return err + } } s.sovDiff = newSubnetOnlyValidatorsDiff() diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 318e52b196b5..32b515e5c713 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -2009,9 +2009,12 @@ func TestSubnetOnlyValidators(t *testing.T) { reloadedState := newTestState(t, db) for subnetID := range subnetIDs { expectedEndValidatorSet := sovsToValidatorSet(expectedSOVs, subnetID) - endValidatorSet := reloadedState.validators.GetMap(subnetID) + endValidatorSet := state.validators.GetMap(subnetID) require.Equal(expectedEndValidatorSet, endValidatorSet) + reloadedEndValidatorSet := reloadedState.validators.GetMap(subnetID) + require.Equal(expectedEndValidatorSet, reloadedEndValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) require.NoError(state.ApplyValidatorPublicKeyDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) From 410bcda8035b55431028d3eae42db65d42af3303 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 17 Sep 2024 09:57:34 -0400 Subject: [PATCH 045/155] remove comment --- vms/platformvm/state/state.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 5ac7419102bf..bac3f27414ab 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2172,7 +2172,6 @@ func (s *state) writeExpiry() error { return nil } -// TODO: Update validator sets // TODO: Add caching func (s *state) writeSubnetOnlyValidators(updateValidators bool, height uint64) error { // Write modified weights: From 149e21e47169618702371e6943fec5df296693a2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 17 Sep 2024 13:45:30 -0400 Subject: [PATCH 046/155] Add SoV excess --- database/helpers.go | 8 ++++ .../block/executor/proposal_block_test.go | 2 + .../block/executor/standard_block_test.go | 2 + .../block/executor/verifier_test.go | 5 +++ vms/platformvm/state/diff.go | 11 ++++++ vms/platformvm/state/diff_test.go | 24 ++++++++++++ vms/platformvm/state/mock_chain.go | 26 +++++++++++++ vms/platformvm/state/mock_diff.go | 26 +++++++++++++ vms/platformvm/state/mock_state.go | 26 +++++++++++++ vms/platformvm/state/state.go | 37 ++++++++++++++----- 10 files changed, 158 insertions(+), 9 deletions(-) diff --git a/database/helpers.go b/database/helpers.go index d17e6669e4fa..43fff80796ad 100644 --- a/database/helpers.go +++ b/database/helpers.go @@ -54,6 +54,14 @@ func GetUInt64(db KeyValueReader, key []byte) (uint64, error) { return ParseUInt64(b) } +func GetOrDefaultUInt64(db KeyValueReader, key []byte, def uint64) (uint64, error) { + v, err := GetUInt64(db, key) + if err == ErrNotFound { + return def, nil + } + return v, err +} + func PackUInt64(val uint64) []byte { bytes := make([]byte, Uint64Size) binary.BigEndian.PutUint64(bytes, val) diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 0708704ebabf..2e0aaa72bc03 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -91,6 +91,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() currentStakersIt := iteratormock.NewIterator[*state.Staker](ctrl) @@ -164,6 +165,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(parentTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index 1bdd822144f3..3162bb8998ae 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -60,6 +60,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() // wrong height @@ -139,6 +140,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().NumActiveSubnetOnlyValidators().Return(0).AnyTimes() txID := ids.GenerateTestID() diff --git a/vms/platformvm/block/executor/verifier_test.go b/vms/platformvm/block/executor/verifier_test.go index bf44ff70d7a6..9b678e95cde9 100644 --- a/vms/platformvm/block/executor/verifier_test.go +++ b/vms/platformvm/block/executor/verifier_test.go @@ -104,6 +104,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { parentOnAcceptState.EXPECT().GetTimestamp().Return(timestamp).Times(2) parentOnAcceptState.EXPECT().GetFeeState().Return(gas.State{}).Times(2) parentOnAcceptState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(2) + parentOnAcceptState.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(2) parentOnAcceptState.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(2) backend := &backend{ @@ -337,6 +338,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { parentState.EXPECT().GetTimestamp().Return(timestamp).Times(1) parentState.EXPECT().GetFeeState().Return(gas.State{}).Times(1) parentState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + parentState.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) parentState.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) parentStatelessBlk.EXPECT().Height().Return(uint64(1)).Times(1) mempool.EXPECT().Remove(apricotBlk.Txs()).Times(1) @@ -600,6 +602,7 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { s.EXPECT().GetTimestamp().Return(parentTime).Times(3) s.EXPECT().GetFeeState().Return(gas.State{}).Times(3) s.EXPECT().GetAccruedFees().Return(uint64(0)).Times(3) + s.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(3) s.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(3) onDecisionState, err := state.NewDiff(parentID, backend) @@ -699,6 +702,7 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { s.EXPECT().GetTimestamp().Return(parentTime).Times(3) s.EXPECT().GetFeeState().Return(gas.State{}).Times(3) s.EXPECT().GetAccruedFees().Return(uint64(0)).Times(3) + s.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(3) s.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(3) onDecisionState, err := state.NewDiff(parentID, backend) @@ -816,6 +820,7 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { parentState.EXPECT().GetTimestamp().Return(timestamp).Times(1) parentState.EXPECT().GetFeeState().Return(gas.State{}).Times(1) parentState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + parentState.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) parentState.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) parentStatelessBlk.EXPECT().Parent().Return(grandParentID).Times(1) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index ae0568650580..5110aa1ea6e0 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -37,6 +37,7 @@ type diff struct { timestamp time.Time feeState gas.State + sovExcess gas.Gas accruedFees uint64 parentActiveSOVs int @@ -82,6 +83,7 @@ func NewDiff( stateVersions: stateVersions, timestamp: parentState.GetTimestamp(), feeState: parentState.GetFeeState(), + sovExcess: parentState.GetSoVExcess(), accruedFees: parentState.GetAccruedFees(), parentActiveSOVs: parentState.NumActiveSubnetOnlyValidators(), expiryDiff: newExpiryDiff(), @@ -121,6 +123,14 @@ func (d *diff) SetFeeState(feeState gas.State) { d.feeState = feeState } +func (d *diff) GetSoVExcess() gas.Gas { + return d.sovExcess +} + +func (d *diff) SetSoVExcess(excess gas.Gas) { + d.sovExcess = excess +} + func (d *diff) GetAccruedFees() uint64 { return d.accruedFees } @@ -553,6 +563,7 @@ func (d *diff) DeleteUTXO(utxoID ids.ID) { func (d *diff) Apply(baseState Chain) error { baseState.SetTimestamp(d.timestamp) baseState.SetFeeState(d.feeState) + baseState.SetSoVExcess(d.sovExcess) baseState.SetAccruedFees(d.accruedFees) for subnetID, supply := range d.currentSupply { baseState.SetCurrentSupply(subnetID, supply) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 187667c34e03..86767348047e 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -70,6 +70,24 @@ func TestDiffFeeState(t *testing.T) { assertChainsEqual(t, state, d) } +func TestDiffSoVExcess(t *testing.T) { + require := require.New(t) + + state := newTestState(t, memdb.New()) + + d, err := NewDiffOn(state) + require.NoError(err) + + initialExcess := state.GetSoVExcess() + newExcess := initialExcess + 1 + d.SetSoVExcess(newExcess) + require.Equal(newExcess, d.GetSoVExcess()) + require.Equal(initialExcess, state.GetSoVExcess()) + + require.NoError(d.Apply(state)) + assertChainsEqual(t, state, d) +} + func TestDiffAccruedFees(t *testing.T) { require := require.New(t) @@ -353,6 +371,7 @@ func TestDiffCurrentValidator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) @@ -389,6 +408,7 @@ func TestDiffPendingValidator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) @@ -431,6 +451,7 @@ func TestDiffCurrentDelegator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) @@ -479,6 +500,7 @@ func TestDiffPendingDelegator(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) @@ -621,6 +643,7 @@ func TestDiffTx(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) @@ -720,6 +743,7 @@ func TestDiffUTXO(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().NumActiveSubnetOnlyValidators().Return(0).Times(1) d, err := NewDiffOn(state) diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 5077a16ff69e..4d34407ee3c2 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -368,6 +368,20 @@ func (mr *MockChainMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), subnetID, nodeID) } +// GetSoVExcess mocks base method. +func (m *MockChain) GetSoVExcess() gas.Gas { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSoVExcess") + ret0, _ := ret[0].(gas.Gas) + return ret0 +} + +// GetSoVExcess indicates an expected call of GetSoVExcess. +func (mr *MockChainMockRecorder) GetSoVExcess() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockChain)(nil).GetSoVExcess)) +} + // GetSubnetManager mocks base method. func (m *MockChain) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { m.ctrl.T.Helper() @@ -646,6 +660,18 @@ func (mr *MockChainMockRecorder) SetFeeState(f any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeState", reflect.TypeOf((*MockChain)(nil).SetFeeState), f) } +// SetSoVExcess mocks base method. +func (m *MockChain) SetSoVExcess(e gas.Gas) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSoVExcess", e) +} + +// SetSoVExcess indicates an expected call of SetSoVExcess. +func (mr *MockChainMockRecorder) SetSoVExcess(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockChain)(nil).SetSoVExcess), e) +} + // SetSubnetManager mocks base method. func (m *MockChain) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index 1112451386f4..95be0ff1fb5e 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -382,6 +382,20 @@ func (mr *MockDiffMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), subnetID, nodeID) } +// GetSoVExcess mocks base method. +func (m *MockDiff) GetSoVExcess() gas.Gas { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSoVExcess") + ret0, _ := ret[0].(gas.Gas) + return ret0 +} + +// GetSoVExcess indicates an expected call of GetSoVExcess. +func (mr *MockDiffMockRecorder) GetSoVExcess() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockDiff)(nil).GetSoVExcess)) +} + // GetSubnetManager mocks base method. func (m *MockDiff) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { m.ctrl.T.Helper() @@ -660,6 +674,18 @@ func (mr *MockDiffMockRecorder) SetFeeState(f any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeState", reflect.TypeOf((*MockDiff)(nil).SetFeeState), f) } +// SetSoVExcess mocks base method. +func (m *MockDiff) SetSoVExcess(e gas.Gas) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSoVExcess", e) +} + +// SetSoVExcess indicates an expected call of SetSoVExcess. +func (mr *MockDiffMockRecorder) SetSoVExcess(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockDiff)(nil).SetSoVExcess), e) +} + // SetSubnetManager mocks base method. func (m *MockDiff) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 1abe784ef272..aa72ae2cbca8 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -542,6 +542,20 @@ func (mr *MockStateMockRecorder) GetRewardUTXOs(txID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockState)(nil).GetRewardUTXOs), txID) } +// GetSoVExcess mocks base method. +func (m *MockState) GetSoVExcess() gas.Gas { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSoVExcess") + ret0, _ := ret[0].(gas.Gas) + return ret0 +} + +// GetSoVExcess indicates an expected call of GetSoVExcess. +func (mr *MockStateMockRecorder) GetSoVExcess() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockState)(nil).GetSoVExcess)) +} + // GetStartTime mocks base method. func (m *MockState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { m.ctrl.T.Helper() @@ -919,6 +933,18 @@ func (mr *MockStateMockRecorder) SetLastAccepted(blkID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), blkID) } +// SetSoVExcess mocks base method. +func (m *MockState) SetSoVExcess(e gas.Gas) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSoVExcess", e) +} + +// SetSoVExcess indicates an expected call of SetSoVExcess. +func (mr *MockStateMockRecorder) SetSoVExcess(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockState)(nil).SetSoVExcess), e) +} + // SetSubnetManager mocks base method. func (m *MockState) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index bac3f27414ab..1c50cdd0b9a2 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -94,6 +94,7 @@ var ( TimestampKey = []byte("timestamp") FeeStateKey = []byte("fee state") + SoVExcessKey = []byte("sov excess") AccruedFeesKey = []byte("accrued fees") CurrentSupplyKey = []byte("current supply") LastAcceptedKey = []byte("last accepted") @@ -118,6 +119,9 @@ type Chain interface { GetFeeState() gas.State SetFeeState(f gas.State) + GetSoVExcess() gas.Gas + SetSoVExcess(e gas.Gas) + GetAccruedFees() uint64 SetAccruedFees(f uint64) @@ -296,6 +300,7 @@ type stateBlk struct { * |-- blocksReindexedKey -> nil * |-- timestampKey -> timestamp * |-- feeStateKey -> feeState + * |-- sovExcessKey -> sovExcess * |-- accruedFeesKey -> accruedFees * |-- currentSupplyKey -> currentSupply * |-- lastAcceptedKey -> lastAccepted @@ -402,6 +407,7 @@ type state struct { // The persisted fields represent the current database value timestamp, persistedTimestamp time.Time feeState, persistedFeeState gas.State + sovExcess, persistedSOVExcess gas.Gas accruedFees, persistedAccruedFees uint64 currentSupply, persistedCurrentSupply uint64 // [lastAccepted] is the most recently accepted block. @@ -1179,6 +1185,14 @@ func (s *state) SetFeeState(feeState gas.State) { s.feeState = feeState } +func (s *state) GetSoVExcess() gas.Gas { + return s.sovExcess +} + +func (s *state) SetSoVExcess(e gas.Gas) { + s.sovExcess = e +} + func (s *state) GetAccruedFees() uint64 { return s.accruedFees } @@ -1481,7 +1495,14 @@ func (s *state) loadMetadata() error { s.persistedFeeState = feeState s.SetFeeState(feeState) - accruedFees, err := getAccruedFees(s.singletonDB) + sovExcess, err := database.GetOrDefaultUInt64(s.singletonDB, SoVExcessKey, 0) + if err != nil { + return err + } + s.persistedSOVExcess = gas.Gas(sovExcess) + s.SetSoVExcess(gas.Gas(sovExcess)) + + accruedFees, err := database.GetOrDefaultUInt64(s.singletonDB, AccruedFeesKey, 0) if err != nil { return err } @@ -2904,6 +2925,12 @@ func (s *state) writeMetadata() error { } s.persistedFeeState = s.feeState } + if s.sovExcess != s.persistedSOVExcess { + if err := database.PutUInt64(s.singletonDB, SoVExcessKey, uint64(s.sovExcess)); err != nil { + return fmt.Errorf("failed to write sov excess: %w", err) + } + s.persistedSOVExcess = s.sovExcess + } if s.accruedFees != s.persistedAccruedFees { if err := database.PutUInt64(s.singletonDB, AccruedFeesKey, s.accruedFees); err != nil { return fmt.Errorf("failed to write accrued fees: %w", err) @@ -3121,11 +3148,3 @@ func getFeeState(db database.KeyValueReader) (gas.State, error) { } return feeState, nil } - -func getAccruedFees(db database.KeyValueReader) (uint64, error) { - accruedFees, err := database.GetUInt64(db, AccruedFeesKey) - if err == database.ErrNotFound { - return 0, nil - } - return accruedFees, err -} From 449994aa2f3f257e8abb596b6c9f798fa5a62788 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 22 Sep 2024 23:29:23 -0400 Subject: [PATCH 047/155] write subnet public key diffs --- vms/platformvm/state/state.go | 334 ++++---- vms/platformvm/state/state_test.go | 1108 ++++++++++---------------- vms/platformvm/validators/manager.go | 92 +-- 3 files changed, 649 insertions(+), 885 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 20a5d42bed03..12405f4ab60e 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1898,19 +1898,28 @@ func (s *state) initValidatorSets() error { } // Load primary network and non-ACP77 validators - for subnetID, validators := range s.currentStakers.validators { + primaryNetworkValidators := s.currentStakers.validators[constants.PrimaryNetworkID] + for subnetID, subnetValidators := range s.currentStakers.validators { if s.validators.Count(subnetID) != 0 { // Enforce the invariant that the validator set is empty here. return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) } - for nodeID, validator := range validators { - validatorStaker := validator.validator - if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + for nodeID, subnetValidator := range subnetValidators { + primaryValidator, ok := primaryNetworkValidators[nodeID] + if !ok { + return errors.New("subnet validator without corresponding primary network validator") + } + + var ( + primaryStaker = primaryValidator.validator + subnetStaker = subnetValidator.validator + ) + if err := s.validators.AddStaker(subnetID, nodeID, primaryStaker.PublicKey, subnetStaker.TxID, subnetStaker.Weight); err != nil { return err } - delegatorIterator := iterator.FromTree(validator.delegators) + delegatorIterator := iterator.FromTree(subnetValidator.delegators) for delegatorIterator.Next() { delegatorStaker := delegatorIterator.Value() if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { @@ -2491,164 +2500,221 @@ func (s *state) makeSubnetOnlyValidatorHistoricalDiffs() (map[subnetIDNodeID]*va func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { + // Write the primary network diff last + if subnetID == constants.PrimaryNetworkID { + continue + } + delete(s.currentStakers.validatorDiffs, subnetID) - // Select db to write to - validatorDB := s.currentSubnetValidatorList - delegatorDB := s.currentSubnetDelegatorList - if subnetID == constants.PrimaryNetworkID { - validatorDB = s.currentValidatorList - delegatorDB = s.currentDelegatorList + err := s.writeCurrentStakersSubnetDiff( + subnetID, + validatorDiffs, + updateValidators, + height, + codecVersion, + ) + if err != nil { + return err } + } - // Record the change in weight and/or public key for each validator. - for nodeID, validatorDiff := range validatorDiffs { - // Copy [nodeID] so it doesn't get overwritten next iteration. - nodeID := nodeID + if validatorDiffs, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID]; ok { + delete(s.currentStakers.validatorDiffs, constants.PrimaryNetworkID) - weightDiff := &ValidatorWeightDiff{ - Decrease: validatorDiff.validatorStatus == deleted, - } - switch validatorDiff.validatorStatus { - case added: - staker := validatorDiff.validator - weightDiff.Amount = staker.Weight - - // Invariant: Only the Primary Network contains non-nil public - // keys. - if staker.PublicKey != nil { - // Record that the public key for the validator is being - // added. This means the prior value for the public key was - // nil. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), - nil, - ) - if err != nil { - return err - } - } + err := s.writeCurrentStakersSubnetDiff( + constants.PrimaryNetworkID, + validatorDiffs, + updateValidators, + height, + codecVersion, + ) + if err != nil { + return err + } + } - // The validator is being added. - // - // Invariant: It's impossible for a delegator to have been - // rewarded in the same block that the validator was added. - startTime := uint64(staker.StartTime.Unix()) - metadata := &validatorMetadata{ - txID: staker.TxID, - lastUpdated: staker.StartTime, - - UpDuration: 0, - LastUpdated: startTime, - StakerStartTime: startTime, - PotentialReward: staker.PotentialReward, - PotentialDelegateeReward: 0, - } + // TODO: Move validator set management out of the state package + // + // Attempt to update the stake metrics + if !updateValidators { + return nil + } - metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) - if err != nil { - return fmt.Errorf("failed to serialize current validator: %w", err) - } + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return fmt.Errorf("failed to get total weight of primary network: %w", err) + } - if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { - return fmt.Errorf("failed to write current validator to list: %w", err) - } + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + s.metrics.SetTotalStake(totalWeight) + return nil +} - s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) - case deleted: - staker := validatorDiff.validator - weightDiff.Amount = staker.Weight - - // Invariant: Only the Primary Network contains non-nil public - // keys. - if staker.PublicKey != nil { - // Record that the public key for the validator is being - // removed. This means we must record the prior value of the - // public key. - // - // Note: We store the uncompressed public key here as it is - // significantly more efficient to parse when applying - // diffs. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), - bls.PublicKeyToUncompressedBytes(staker.PublicKey), - ) - if err != nil { - return err - } - } +func (s *state) writeCurrentStakersSubnetDiff( + subnetID ids.ID, + validatorDiffs map[ids.NodeID]*diffValidator, + updateValidators bool, + height uint64, + codecVersion uint16, +) error { + // Select db to write to + validatorDB := s.currentSubnetValidatorList + delegatorDB := s.currentSubnetDelegatorList + if subnetID == constants.PrimaryNetworkID { + validatorDB = s.currentValidatorList + delegatorDB = s.currentDelegatorList + } - if err := validatorDB.Delete(staker.TxID[:]); err != nil { - return fmt.Errorf("failed to delete current staker: %w", err) - } + // Record the change in weight and/or public key for each validator. + for nodeID, validatorDiff := range validatorDiffs { + // Copy [nodeID] so it doesn't get overwritten next iteration. + nodeID := nodeID - s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) + var ( + staker *Staker + pk *bls.PublicKey + weightDiff = &ValidatorWeightDiff{ + Decrease: validatorDiff.validatorStatus == deleted, + } + ) + if validatorDiff.validatorStatus != unmodified { + staker = validatorDiff.validator + + pk = staker.PublicKey + // For non-primary network validators, the public key is inherited + // from the primary network. + if subnetID != constants.PrimaryNetworkID { + if vdr, ok := s.currentStakers.validators[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is still present after + // writing. + pk = vdr.validator.PublicKey + } else if vdr, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is being removed during + // writing. + pk = vdr.validator.PublicKey + } else { + // This should never happen. + return errors.New("missing primary network validator") + } } - err := writeCurrentDelegatorDiff( - delegatorDB, - weightDiff, - validatorDiff, - codecVersion, - ) - if err != nil { - return err + weightDiff.Amount = staker.Weight + } + + switch validatorDiff.validatorStatus { + case added: + if pk != nil { + // Record that the public key for the validator is being + // added. This means the prior value for the public key was + // nil. + err := s.validatorPublicKeyDiffsDB.Put( + marshalDiffKey(subnetID, height, nodeID), + nil, + ) + if err != nil { + return err + } } - if weightDiff.Amount == 0 { - // No weight change to record; go to next validator. - continue + // The validator is being added. + // + // Invariant: It's impossible for a delegator to have been + // rewarded in the same block that the validator was added. + startTime := uint64(staker.StartTime.Unix()) + metadata := &validatorMetadata{ + txID: staker.TxID, + lastUpdated: staker.StartTime, + + UpDuration: 0, + LastUpdated: startTime, + StakerStartTime: startTime, + PotentialReward: staker.PotentialReward, + PotentialDelegateeReward: 0, } - err = s.validatorWeightDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - marshalWeightDiff(weightDiff), - ) + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) if err != nil { - return err + return fmt.Errorf("failed to serialize current validator: %w", err) } - // TODO: Move the validator set management out of the state package - if !updateValidators { - continue + if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { + return fmt.Errorf("failed to write current validator to list: %w", err) } - if weightDiff.Decrease { - err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) - } else { - if validatorDiff.validatorStatus == added { - staker := validatorDiff.validator - err = s.validators.AddStaker( - subnetID, - nodeID, - staker.PublicKey, - staker.TxID, - weightDiff.Amount, - ) - } else { - err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) + case deleted: + if pk != nil { + // Record that the public key for the validator is being + // removed. This means we must record the prior value of the + // public key. + // + // Note: We store the uncompressed public key here as it is + // significantly more efficient to parse when applying + // diffs. + err := s.validatorPublicKeyDiffsDB.Put( + marshalDiffKey(subnetID, height, nodeID), + bls.PublicKeyToUncompressedBytes(pk), + ) + if err != nil { + return err } } - if err != nil { - return fmt.Errorf("failed to update validator weight: %w", err) + + if err := validatorDB.Delete(staker.TxID[:]); err != nil { + return fmt.Errorf("failed to delete current staker: %w", err) } + + s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) } - } - // TODO: Move validator set management out of the state package - // - // Attempt to update the stake metrics - if !updateValidators { - return nil - } + err := writeCurrentDelegatorDiff( + delegatorDB, + weightDiff, + validatorDiff, + codecVersion, + ) + if err != nil { + return err + } - totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) - if err != nil { - return fmt.Errorf("failed to get total weight of primary network: %w", err) - } + if weightDiff.Amount == 0 { + // No weight change to record; go to next validator. + continue + } - s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - s.metrics.SetTotalStake(totalWeight) + err = s.validatorWeightDiffsDB.Put( + marshalDiffKey(subnetID, height, nodeID), + marshalWeightDiff(weightDiff), + ) + if err != nil { + return err + } + + // TODO: Move the validator set management out of the state package + if !updateValidators { + continue + } + + if weightDiff.Decrease { + err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) + } else { + if validatorDiff.validatorStatus == added { + err = s.validators.AddStaker( + subnetID, + nodeID, + pk, + staker.TxID, + weightDiff.Amount, + ) + } else { + err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + } + } + if err != nil { + return fmt.Errorf("failed to update validator weight: %w", err) + } + } return nil } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 5c79594efbd1..f093b90a587c 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -5,7 +5,6 @@ package state import ( "context" - "fmt" "maps" "math" "math/rand" @@ -30,6 +29,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -112,620 +112,340 @@ func TestStateSyncGenesis(t *testing.T) { ) } -// Whenever we store a staker, a whole bunch a data structures are updated +// Whenever we store a staker, a whole bunch of data structures are updated // This test is meant to capture which updates are carried out func TestPersistStakers(t *testing.T) { - tests := map[string]struct { - // Insert or delete a staker to state and store it - storeStaker func(*require.Assertions, ids.ID /*=subnetID*/, *state) *Staker - - // Check that the staker is duly stored/removed in P-chain state - checkStakerInState func(*require.Assertions, *state, *Staker) + const ( + primaryValidatorDuration = 28 * 24 * time.Hour + primaryDelegatorDuration = 14 * 24 * time.Hour + subnetValidatorDuration = 21 * 24 * time.Hour + subnetDelegatorDuration = 14 * 24 * time.Hour + + primaryValidatorReward = iota + primaryDelegatorReward + ) + var ( + primaryValidatorStartTime = time.Now().Truncate(time.Second) + primaryValidatorEndTime = primaryValidatorStartTime.Add(primaryValidatorDuration) + primaryValidatorEndTimeUnix = uint64(primaryValidatorEndTime.Unix()) + + primaryDelegatorStartTime = primaryValidatorStartTime + primaryDelegatorEndTime = primaryDelegatorStartTime.Add(primaryDelegatorDuration) + primaryDelegatorEndTimeUnix = uint64(primaryDelegatorEndTime.Unix()) + + primaryValidatorData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: primaryValidatorEndTimeUnix, + Wght: 1234, + } + primaryDelegatorData = txs.Validator{ + NodeID: primaryValidatorData.NodeID, + End: primaryDelegatorEndTimeUnix, + Wght: 6789, + } + ) - // Check whether validators are duly reported in the validator set, - // with the right weight and showing the BLS key - checkValidatorsSet func(*require.Assertions, *state, *Staker) + unsignedAddPrimaryNetworkValidator := createPermissionlessValidatorTx(t, constants.PrimaryNetworkID, primaryValidatorData) + addPrimaryNetworkValidator := &txs.Tx{Unsigned: unsignedAddPrimaryNetworkValidator} + require.NoError(t, addPrimaryNetworkValidator.Initialize(txs.Codec)) - // Check that node duly track stakers uptimes - checkValidatorUptimes func(*require.Assertions, *state, *Staker) + primaryNetworkPendingValidatorStaker, err := NewPendingStaker( + addPrimaryNetworkValidator.ID(), + unsignedAddPrimaryNetworkValidator, + ) + require.NoError(t, err) - // Check whether weight/bls keys diffs are duly stored - checkDiffs func(*require.Assertions, *state, *Staker, uint64) - }{ - "add current validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + primaryNetworkCurrentValidatorStaker, err := NewCurrentStaker( + addPrimaryNetworkValidator.ID(), + unsignedAddPrimaryNetworkValidator, + primaryValidatorStartTime, + primaryValidatorReward, + ) + require.NoError(t, err) - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(endTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - ) + unsignedAddPrimaryNetworkDelegator := createPermissionlessDelegatorTx(constants.PrimaryNetworkID, primaryDelegatorData) + addPrimaryNetworkDelegator := &txs.Tx{Unsigned: unsignedAddPrimaryNetworkDelegator} + require.NoError(t, addPrimaryNetworkDelegator.Initialize(txs.Codec)) - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) + primaryNetworkPendingDelegatorStaker, err := NewPendingStaker( + addPrimaryNetworkDelegator.ID(), + unsignedAddPrimaryNetworkDelegator, + ) + require.NoError(t, err) - staker, err := NewCurrentStaker( - addPermValTx.ID(), - utx, - time.Unix(startTime, 0), - validatorReward, - ) - r.NoError(err) - - r.NoError(s.PutCurrentValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - retrievedStaker, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.Equal(staker, retrievedStaker) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - r.Equal( - &validators.GetValidatorOutput{ - NodeID: staker.NodeID, - PublicKey: staker.PublicKey, - Weight: staker.Weight, - }, - valsMap[staker.NodeID], - ) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - upDuration, lastUpdated, err := s.GetUptime(staker.NodeID) - if staker.SubnetID != constants.PrimaryNetworkID { - // only primary network validators have uptimes - r.ErrorIs(err, database.ErrNotFound) - } else { - r.NoError(err) - r.Equal(upDuration, time.Duration(0)) - r.Equal(lastUpdated, staker.StartTime) - } - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: false, - Amount: staker.Weight, - }, weightDiff) - - blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - if staker.SubnetID == constants.PrimaryNetworkID { - r.NoError(err) - r.Nil(blsDiffBytes) - } else { - r.ErrorIs(err, database.ErrNotFound) - } - }, - }, - "add current delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert the delegator and its validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(valEndTime), - Wght: 1234, - } - validatorReward uint64 = 5678 + primaryNetworkCurrentDelegatorStaker, err := NewCurrentStaker( + addPrimaryNetworkDelegator.ID(), + unsignedAddPrimaryNetworkDelegator, + primaryDelegatorStartTime, + primaryDelegatorReward, + ) + require.NoError(t, err) - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - delegatorReward uint64 = 5432 - ) + tests := map[string]struct { + initialStakers []*Staker + initialTxs []*txs.Tx - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) + // Staker to insert or remove + staker *Staker + tx *txs.Tx // If tx is nil, the staker is being removed - val, err := NewCurrentStaker( - addPermValTx.ID(), - utxVal, - time.Unix(valStartTime, 0), - validatorReward, - ) - r.NoError(err) + // Check that the staker is duly stored/removed in P-chain state + expectedCurrentValidator *Staker + expectedPendingValidator *Staker + expectedCurrentDelegators []*Staker + expectedPendingDelegators []*Staker - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) + // Check that the validator entry has been set correctly in the + // in-memory validator set. + expectedValidatorSetOutput *validators.GetValidatorOutput - del, err := NewCurrentStaker( - addPermDelTx.ID(), - utxDel, - time.Unix(delStartTime, 0), - delegatorReward, - ) - r.NoError(err) - - r.NoError(s.PutCurrentValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.PutCurrentDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.True(delIt.Next()) - retrievedDelegator := delIt.Value() - r.False(delIt.Next()) - delIt.Release() - r.Equal(staker, retrievedDelegator) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - valOut := valsMap[staker.NodeID] - r.Equal(valOut.NodeID, staker.NodeID) - r.Equal(valOut.Weight, val.Weight+staker.Weight) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // validator's weight must increase of delegator's weight amount - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: false, - Amount: staker.Weight, - }, weightDiff) + // Check whether weight/bls keys diffs are duly stored + expectedWeightDiff *ValidatorWeightDiff + expectedPublicKeyDiff maybe.Maybe[*bls.PublicKey] + }{ + "add current primary network validator": { + staker: primaryNetworkCurrentValidatorStaker, + tx: addPrimaryNetworkValidator, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: primaryNetworkCurrentValidatorStaker.Weight, }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](nil), }, - "add pending validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(startTime), - End: uint64(endTime), - Wght: 1234, - } - ) - - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - staker, err := NewPendingStaker( - addPermValTx.ID(), - utx, - ) - r.NoError(err) - - r.NoError(s.PutPendingValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return staker + "add current primary network delegator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkCurrentDelegatorStaker, + tx: addPrimaryNetworkDelegator, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedCurrentDelegators: []*Staker{primaryNetworkCurrentDelegatorStaker}, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentDelegatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentDelegatorStaker.Weight + primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: primaryNetworkCurrentDelegatorStaker.Weight, }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - retrievedStaker, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.Equal(staker, retrievedStaker) + }, + "add pending primary network validator": { + staker: primaryNetworkPendingValidatorStaker, + tx: addPrimaryNetworkValidator, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, + }, + "add pending primary network delegator": { + initialStakers: []*Staker{primaryNetworkPendingValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkPendingDelegatorStaker, + tx: addPrimaryNetworkDelegator, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, + expectedPendingDelegators: []*Staker{primaryNetworkPendingDelegatorStaker}, + }, + "delete current primary network validator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkCurrentValidatorStaker, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: primaryNetworkCurrentValidatorStaker.Weight, }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - // pending validators are not showed in validators set - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) + expectedPublicKeyDiff: maybe.Some(primaryNetworkCurrentValidatorStaker.PublicKey), + }, + "delete current primary network delegator": { + initialStakers: []*Staker{ + primaryNetworkCurrentValidatorStaker, + primaryNetworkCurrentDelegatorStaker, + }, + initialTxs: []*txs.Tx{ + addPrimaryNetworkValidator, + addPrimaryNetworkDelegator, + }, + staker: primaryNetworkCurrentDelegatorStaker, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: primaryNetworkCurrentDelegatorStaker.Weight, }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // pending validators uptime is not tracked - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) + }, + "delete pending primary network validator": { + initialStakers: []*Staker{primaryNetworkPendingValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkPendingValidatorStaker, + }, + "delete pending primary network delegator": { + initialStakers: []*Staker{ + primaryNetworkPendingValidatorStaker, + primaryNetworkPendingDelegatorStaker, }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // pending validators weight diff and bls diffs are not stored - _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - - _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) + initialTxs: []*txs.Tx{ + addPrimaryNetworkValidator, + addPrimaryNetworkDelegator, }, + staker: primaryNetworkPendingDelegatorStaker, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, }, - "add pending delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert the delegator and its validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(valStartTime), - End: uint64(valEndTime), - Wght: 1234, - } - - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - Start: uint64(delStartTime), - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - ) + } - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) - val, err := NewPendingStaker(addPermValTx.ID(), utxVal) - r.NoError(err) + db := memdb.New() + state := newTestState(t, db) - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) + // create and store the initial stakers + for _, staker := range test.initialStakers { + switch { + case staker.Priority.IsCurrentValidator(): + require.NoError(state.PutCurrentValidator(staker)) + case staker.Priority.IsPendingValidator(): + require.NoError(state.PutPendingValidator(staker)) + case staker.Priority.IsCurrentDelegator(): + state.PutCurrentDelegator(staker) + case staker.Priority.IsPendingDelegator(): + state.PutPendingDelegator(staker) + } + } + for _, tx := range test.initialTxs { + state.AddTx(tx, status.Committed) + } - del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) - r.NoError(err) + state.SetHeight(0) + require.NoError(state.Commit()) - r.NoError(s.PutPendingValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + // create and store the staker under test + switch { + case test.staker.Priority.IsCurrentValidator(): + if test.tx != nil { + require.NoError(state.PutCurrentValidator(test.staker)) + } else { + state.DeleteCurrentValidator(test.staker) + } + case test.staker.Priority.IsPendingValidator(): + if test.tx != nil { + require.NoError(state.PutPendingValidator(test.staker)) + } else { + state.DeletePendingValidator(test.staker) + } + case test.staker.Priority.IsCurrentDelegator(): + if test.tx != nil { + state.PutCurrentDelegator(test.staker) + } else { + state.DeleteCurrentDelegator(test.staker) + } + case test.staker.Priority.IsPendingDelegator(): + if test.tx != nil { + state.PutPendingDelegator(test.staker) + } else { + state.DeletePendingDelegator(test.staker) + } + } + if test.tx != nil { + state.AddTx(test.tx, status.Committed) + } - s.PutPendingDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + state.SetHeight(1) + require.NoError(state.Commit()) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.True(delIt.Next()) - retrievedDelegator := delIt.Value() - r.False(delIt.Next()) - delIt.Release() - r.Equal(staker, retrievedDelegator) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, - }, - "delete current validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // add them remove the validator - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + // Perform the checks once immediately after committing to the + // state, and once after re-loading the state from disk. + for i := 0; i < 2; i++ { + currentValidator, err := state.GetCurrentValidator(test.staker.SubnetID, test.staker.NodeID) + if test.expectedCurrentValidator == nil { + require.ErrorIs(err, database.ErrNotFound) - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(endTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - ) + // Only current validators should have uptimes + _, _, err := state.GetUptime(test.staker.NodeID) + require.ErrorIs(err, database.ErrNotFound) + } else { + require.NoError(err) + require.Equal(test.expectedCurrentValidator, currentValidator) - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) + // Current validators should also have uptimes + upDuration, lastUpdated, err := state.GetUptime(currentValidator.NodeID) + require.NoError(err) + require.Zero(upDuration) + require.Equal(currentValidator.StartTime, lastUpdated) + } - staker, err := NewCurrentStaker( - addPermValTx.ID(), - utx, - time.Unix(startTime, 0), - validatorReward, - ) - r.NoError(err) - - r.NoError(s.PutCurrentValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeleteCurrentValidator(staker) - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - _, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - // deleted validators are not showed in the validators set anymore - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // uptimes of delete validators are dropped - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: true, - Amount: staker.Weight, - }, weightDiff) - - blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - if staker.SubnetID == constants.PrimaryNetworkID { - r.NoError(err) - r.Equal(bls.PublicKeyFromValidUncompressedBytes(blsDiffBytes), staker.PublicKey) + pendingValidator, err := state.GetPendingValidator(test.staker.SubnetID, test.staker.NodeID) + if test.expectedPendingValidator == nil { + require.ErrorIs(err, database.ErrNotFound) } else { - r.ErrorIs(err, database.ErrNotFound) + require.NoError(err) + require.Equal(test.expectedPendingValidator, pendingValidator) } - }, - }, - "delete current delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert validator and delegator, then remove the delegator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(valEndTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - delegatorReward uint64 = 5432 - ) - - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) - val, err := NewCurrentStaker( - addPermValTx.ID(), - utxVal, - time.Unix(valStartTime, 0), - validatorReward, - ) - r.NoError(err) - - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) - - del, err := NewCurrentStaker( - addPermDelTx.ID(), - utxDel, - time.Unix(delStartTime, 0), - delegatorReward, - ) - r.NoError(err) - - r.NoError(s.PutCurrentValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - - s.PutCurrentDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeleteCurrentDelegator(del) - r.NoError(s.Commit()) - - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.False(delIt.Next()) - delIt.Release() - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - valOut := valsMap[staker.NodeID] - r.Equal(valOut.NodeID, staker.NodeID) - r.Equal(valOut.Weight, val.Weight) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // validator's weight must decrease of delegator's weight amount - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: true, - Amount: staker.Weight, - }, weightDiff) - }, - }, - "delete pending validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(startTime), - End: uint64(endTime), - Wght: 1234, - } + it, err := state.GetCurrentDelegatorIterator(test.staker.SubnetID, test.staker.NodeID) + require.NoError(err) + require.Equal( + test.expectedCurrentDelegators, + iterator.ToSlice(it), ) - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - staker, err := NewPendingStaker( - addPermValTx.ID(), - utx, + it, err = state.GetPendingDelegatorIterator(test.staker.SubnetID, test.staker.NodeID) + require.NoError(err) + require.Equal( + test.expectedPendingDelegators, + iterator.ToSlice(it), ) - r.NoError(err) - - r.NoError(s.PutPendingValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeletePendingValidator(staker) - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - _, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - - _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - }, - }, - "delete pending delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert validator and delegator the remove the validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(valStartTime), - End: uint64(valEndTime), - Wght: 1234, - } - - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - Start: uint64(delStartTime), - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } + require.Equal( + test.expectedValidatorSetOutput, + state.validators.GetMap(test.staker.SubnetID)[test.staker.NodeID], ) - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - val, err := NewPendingStaker(addPermValTx.ID(), utxVal) - r.NoError(err) - - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) - - del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) - r.NoError(err) - - r.NoError(s.PutPendingValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - - s.PutPendingDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeletePendingDelegator(del) - r.NoError(s.Commit()) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.False(delIt.Next()) - delIt.Release() - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, - }, - } - - subnetIDs := []ids.ID{constants.PrimaryNetworkID, ids.GenerateTestID()} - for _, subnetID := range subnetIDs { - for name, test := range tests { - t.Run(fmt.Sprintf("%s - subnetID %s", name, subnetID), func(t *testing.T) { - require := require.New(t) - - db := memdb.New() - state := newTestState(t, db) - - // create and store the staker - staker := test.storeStaker(require, subnetID, state) + diffKey := marshalDiffKey(test.staker.SubnetID, 1, test.staker.NodeID) + weightDiffBytes, err := state.validatorWeightDiffsDB.Get(diffKey) + if test.expectedWeightDiff == nil { + require.ErrorIs(err, database.ErrNotFound) + } else { + require.NoError(err) - // check all relevant data are stored - test.checkStakerInState(require, state, staker) - test.checkValidatorsSet(require, state, staker) - test.checkValidatorUptimes(require, state, staker) - test.checkDiffs(require, state, staker, 0 /*height*/) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + require.NoError(err) + require.Equal(test.expectedWeightDiff, weightDiff) + } - // rebuild the state - rebuiltState := newTestState(t, db) + publicKeyDiffBytes, err := state.validatorPublicKeyDiffsDB.Get(diffKey) + if test.expectedPublicKeyDiff.IsNothing() { + require.ErrorIs(err, database.ErrNotFound) + } else if expectedPublicKeyDiff := test.expectedPublicKeyDiff.Value(); expectedPublicKeyDiff == nil { + require.NoError(err) + require.Empty(publicKeyDiffBytes) + } else { + require.NoError(err) + require.Equal(expectedPublicKeyDiff, bls.PublicKeyFromValidUncompressedBytes(publicKeyDiffBytes)) + } - // check again that all relevant data are still available in rebuilt state - test.checkStakerInState(require, rebuiltState, staker) - test.checkValidatorsSet(require, rebuiltState, staker) - test.checkValidatorUptimes(require, rebuiltState, staker) - test.checkDiffs(require, rebuiltState, staker, 0 /*height*/) - }) - } + // re-load the state from disk + state = newTestState(t, db) + } + }) } } -func createPermissionlessValidatorTx(r *require.Assertions, subnetID ids.ID, validatorsData txs.Validator) *txs.AddPermissionlessValidatorTx { +func createPermissionlessValidatorTx( + t testing.TB, + subnetID ids.ID, + validatorsData txs.Validator, +) *txs.AddPermissionlessValidatorTx { var sig signer.Signer = &signer.Empty{} if subnetID == constants.PrimaryNetworkID { sk, err := bls.NewSecretKey() - r.NoError(err) + require.NoError(t, err) sig = signer.NewProofOfPossession(sk) } @@ -999,35 +719,43 @@ func TestStateAddRemoveValidator(t *testing.T) { state := newTestState(t, memdb.New()) var ( - numNodes = 3 - subnetID = ids.GenerateTestID() - startTime = time.Now() - endTime = startTime.Add(24 * time.Hour) - stakers = make([]Staker, numNodes) + numNodes = 5 + subnetID = ids.GenerateTestID() + startTime = time.Now() + endTime = startTime.Add(24 * time.Hour) + primaryStakers = make([]Staker, numNodes) + subnetStakers = make([]Staker, numNodes) ) - for i := 0; i < numNodes; i++ { - stakers[i] = Staker{ + for i := range primaryStakers { + sk, err := bls.NewSecretKey() + require.NoError(err) + + primaryStakers[i] = Staker{ TxID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), + PublicKey: bls.PublicFromSecretKey(sk), + SubnetID: constants.PrimaryNetworkID, Weight: uint64(i + 1), StartTime: startTime.Add(time.Duration(i) * time.Second), EndTime: endTime.Add(time.Duration(i) * time.Second), PotentialReward: uint64(i + 1), } - if i%2 == 0 { - stakers[i].SubnetID = subnetID - } else { - sk, err := bls.NewSecretKey() - require.NoError(err) - stakers[i].PublicKey = bls.PublicFromSecretKey(sk) - stakers[i].SubnetID = constants.PrimaryNetworkID + } + for i, primaryStaker := range primaryStakers { + subnetStakers[i] = Staker{ + TxID: ids.GenerateTestID(), + NodeID: primaryStaker.NodeID, + PublicKey: nil, // Key is inherited from the primary network + SubnetID: subnetID, + Weight: uint64(i + 1), + StartTime: primaryStaker.StartTime, + EndTime: primaryStaker.EndTime, + PotentialReward: uint64(i + 1), } } type diff struct { addedValidators []Staker - addedDelegators []Staker - removedDelegators []Staker removedValidators []Staker expectedPrimaryValidatorSet map[ids.NodeID]*validators.GetValidatorOutput @@ -1040,101 +768,174 @@ func TestStateAddRemoveValidator(t *testing.T) { expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Add a subnet validator - addedValidators: []Staker{stakers[0]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[0].NodeID: { - NodeID: stakers[0].NodeID, - Weight: stakers[0].Weight, + // Add primary validator 0 + addedValidators: []Staker{primaryStakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, }, }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Remove a subnet validator - removedValidators: []Staker{stakers[0]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + // Add subnet validator 0 + addedValidators: []Staker{subnetStakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, + }, + }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + subnetStakers[0].NodeID: { + NodeID: subnetStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: subnetStakers[0].Weight, + }, + }, }, - { // Add a primary network validator - addedValidators: []Staker{stakers[1]}, + { + // Remove subnet validator 0 + removedValidators: []Staker{subnetStakers[0]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Do nothing + // Add primary network validator 1, and subnet validator 1 + addedValidators: []Staker{primaryStakers[1], subnetStakers[1]}, + // Remove primary network validator 0, and subnet validator 1 + removedValidators: []Staker{primaryStakers[0], subnetStakers[1]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[1].NodeID: { + NodeID: primaryStakers[1].NodeID, + PublicKey: primaryStakers[1].PublicKey, + Weight: primaryStakers[1].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, - { // Remove a primary network validator - removedValidators: []Staker{stakers[1]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + { + // Add primary network validator 2, and subnet validator 2 + addedValidators: []Staker{primaryStakers[2], subnetStakers[2]}, + // Remove primary network validator 1 + removedValidators: []Staker{primaryStakers[1]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[2].NodeID: { + NodeID: primaryStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: primaryStakers[2].Weight, + }, + }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + subnetStakers[2].NodeID: { + NodeID: subnetStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: subnetStakers[2].Weight, + }, + }, }, { - // Add 2 subnet validators and a primary network validator - addedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + // Add primary network and subnet validators 3 & 4 + addedValidators: []Staker{primaryStakers[3], primaryStakers[4], subnetStakers[3], subnetStakers[4]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[2].NodeID: { + NodeID: primaryStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: primaryStakers[2].Weight, + }, + primaryStakers[3].NodeID: { + NodeID: primaryStakers[3].NodeID, + PublicKey: primaryStakers[3].PublicKey, + Weight: primaryStakers[3].Weight, + }, + primaryStakers[4].NodeID: { + NodeID: primaryStakers[4].NodeID, + PublicKey: primaryStakers[4].PublicKey, + Weight: primaryStakers[4].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[0].NodeID: { - NodeID: stakers[0].NodeID, - Weight: stakers[0].Weight, + subnetStakers[2].NodeID: { + NodeID: subnetStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: subnetStakers[2].Weight, + }, + subnetStakers[3].NodeID: { + NodeID: subnetStakers[3].NodeID, + PublicKey: primaryStakers[3].PublicKey, + Weight: subnetStakers[3].Weight, }, - stakers[2].NodeID: { - NodeID: stakers[2].NodeID, - Weight: stakers[2].Weight, + subnetStakers[4].NodeID: { + NodeID: subnetStakers[4].NodeID, + PublicKey: primaryStakers[4].PublicKey, + Weight: subnetStakers[4].Weight, }, }, }, { - // Remove 2 subnet validators and a primary network validator. - removedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + // Remove primary network and subnet validators 2 & 3 & 4 + removedValidators: []Staker{ + primaryStakers[2], primaryStakers[3], primaryStakers[4], + subnetStakers[2], subnetStakers[3], subnetStakers[4], + }, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + }, + { + // Do nothing expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, } for currentIndex, diff := range diffs { + d, err := NewDiffOn(state) + require.NoError(err) + + var expectedValidators set.Set[subnetIDNodeID] for _, added := range diff.addedValidators { added := added - require.NoError(state.PutCurrentValidator(&added)) - } - for _, added := range diff.addedDelegators { - added := added - state.PutCurrentDelegator(&added) - } - for _, removed := range diff.removedDelegators { - removed := removed - state.DeleteCurrentDelegator(&removed) + require.NoError(d.PutCurrentValidator(&added)) + + expectedValidators.Add(subnetIDNodeID{ + subnetID: added.SubnetID, + nodeID: added.NodeID, + }) } for _, removed := range diff.removedValidators { removed := removed - state.DeleteCurrentValidator(&removed) + d.DeleteCurrentValidator(&removed) + + expectedValidators.Remove(subnetIDNodeID{ + subnetID: removed.SubnetID, + nodeID: removed.NodeID, + }) } + require.NoError(d.Apply(state)) + currentHeight := uint64(currentIndex + 1) state.SetHeight(currentHeight) require.NoError(state.Commit()) for _, added := range diff.addedValidators { + subnetNodeID := subnetIDNodeID{ + subnetID: added.SubnetID, + nodeID: added.NodeID, + } + if !expectedValidators.Contains(subnetNodeID) { + continue + } + gotValidator, err := state.GetCurrentValidator(added.SubnetID, added.NodeID) require.NoError(err) require.Equal(added, *gotValidator) @@ -1157,8 +958,6 @@ func TestStateAddRemoveValidator(t *testing.T) { prevHeight+1, constants.PrimaryNetworkID, )) - requireEqualWeightsValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) - require.NoError(state.ApplyValidatorPublicKeyDiffs( context.Background(), primaryValidatorSet, @@ -1166,7 +965,7 @@ func TestStateAddRemoveValidator(t *testing.T) { prevHeight+1, constants.PrimaryNetworkID, )) - requireEqualPublicKeysValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) + require.Equal(prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) subnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) require.NoError(state.ApplyValidatorWeightDiffs( @@ -1176,7 +975,14 @@ func TestStateAddRemoveValidator(t *testing.T) { prevHeight+1, subnetID, )) - requireEqualWeightsValidatorSet(require, prevDiff.expectedSubnetValidatorSet, subnetValidatorSet) + require.NoError(state.ApplyValidatorPublicKeyDiffs( + context.Background(), + subnetValidatorSet, + currentHeight, + prevHeight+1, + subnetID, + )) + require.Equal(prevDiff.expectedSubnetValidatorSet, subnetValidatorSet) } } } @@ -1192,36 +998,6 @@ func copyValidatorSet( return result } -func requireEqualWeightsValidatorSet( - require *require.Assertions, - expected map[ids.NodeID]*validators.GetValidatorOutput, - actual map[ids.NodeID]*validators.GetValidatorOutput, -) { - require.Len(actual, len(expected)) - for nodeID, expectedVdr := range expected { - require.Contains(actual, nodeID) - - actualVdr := actual[nodeID] - require.Equal(expectedVdr.NodeID, actualVdr.NodeID) - require.Equal(expectedVdr.Weight, actualVdr.Weight) - } -} - -func requireEqualPublicKeysValidatorSet( - require *require.Assertions, - expected map[ids.NodeID]*validators.GetValidatorOutput, - actual map[ids.NodeID]*validators.GetValidatorOutput, -) { - require.Len(actual, len(expected)) - for nodeID, expectedVdr := range expected { - require.Contains(actual, nodeID) - - actualVdr := actual[nodeID] - require.Equal(expectedVdr.NodeID, actualVdr.NodeID) - require.Equal(expectedVdr.PublicKey, actualVdr.PublicKey) - } -} - func TestParsedStateBlock(t *testing.T) { var ( require = require.New(t) diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index 02ff6e475ab2..feeb182dd84d 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -200,17 +200,7 @@ func (m *manager) GetValidatorSet( // get the start time to track metrics startTime := m.clk.Time() - - var ( - validatorSet map[ids.NodeID]*validators.GetValidatorOutput - currentHeight uint64 - err error - ) - if subnetID == constants.PrimaryNetworkID { - validatorSet, currentHeight, err = m.makePrimaryNetworkValidatorSet(ctx, targetHeight) - } else { - validatorSet, currentHeight, err = m.makeSubnetValidatorSet(ctx, targetHeight, subnetID) - } + validatorSet, currentHeight, err := m.makeValidatorSet(ctx, targetHeight, subnetID) if err != nil { return nil, err } @@ -243,65 +233,12 @@ func (m *manager) getValidatorSetCache(subnetID ids.ID) cache.Cacher[uint64, map return validatorSetsCache } -func (m *manager) makePrimaryNetworkValidatorSet( - ctx context.Context, - targetHeight uint64, -) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - validatorSet, currentHeight, err := m.getCurrentPrimaryValidatorSet(ctx) - if err != nil { - return nil, 0, err - } - if currentHeight < targetHeight { - return nil, 0, fmt.Errorf("%w with SubnetID = %s: current P-chain height (%d) < requested P-Chain height (%d)", - errUnfinalizedHeight, - constants.PrimaryNetworkID, - currentHeight, - targetHeight, - ) - } - - // Rebuild primary network validators at [targetHeight] - // - // Note: Since we are attempting to generate the validator set at - // [targetHeight], we want to apply the diffs from - // (targetHeight, currentHeight]. Because the state interface is implemented - // to be inclusive, we apply diffs in [targetHeight + 1, currentHeight]. - lastDiffHeight := targetHeight + 1 - err = m.state.ApplyValidatorWeightDiffs( - ctx, - validatorSet, - currentHeight, - lastDiffHeight, - constants.PrimaryNetworkID, - ) - if err != nil { - return nil, 0, err - } - - err = m.state.ApplyValidatorPublicKeyDiffs( - ctx, - validatorSet, - currentHeight, - lastDiffHeight, - constants.PrimaryNetworkID, - ) - return validatorSet, currentHeight, err -} - -func (m *manager) getCurrentPrimaryValidatorSet( - ctx context.Context, -) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) - currentHeight, err := m.getCurrentHeight(ctx) - return primaryMap, currentHeight, err -} - -func (m *manager) makeSubnetValidatorSet( +func (m *manager) makeValidatorSet( ctx context.Context, targetHeight uint64, subnetID ids.ID, ) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - subnetValidatorSet, primaryValidatorSet, currentHeight, err := m.getCurrentValidatorSets(ctx, subnetID) + subnetValidatorSet, currentHeight, err := m.getCurrentValidatorSet(ctx, subnetID) if err != nil { return nil, 0, err } @@ -332,38 +269,23 @@ func (m *manager) makeSubnetValidatorSet( return nil, 0, err } - // Update the subnet validator set to include the public keys at - // [currentHeight]. When we apply the public key diffs, we will convert - // these keys to represent the public keys at [targetHeight]. If the subnet - // validator is not currently a primary network validator, it doesn't have a - // key at [currentHeight]. - for nodeID, vdr := range subnetValidatorSet { - if primaryVdr, ok := primaryValidatorSet[nodeID]; ok { - vdr.PublicKey = primaryVdr.PublicKey - } else { - vdr.PublicKey = nil - } - } - - // Prior to ACP-77, public keys were inherited from the primary network. err = m.state.ApplyValidatorPublicKeyDiffs( ctx, subnetValidatorSet, currentHeight, lastDiffHeight, - constants.PrimaryNetworkID, + subnetID, ) return subnetValidatorSet, currentHeight, err } -func (m *manager) getCurrentValidatorSets( +func (m *manager) getCurrentValidatorSet( ctx context.Context, subnetID ids.ID, -) (map[ids.NodeID]*validators.GetValidatorOutput, map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { +) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { subnetMap := m.cfg.Validators.GetMap(subnetID) - primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) currentHeight, err := m.getCurrentHeight(ctx) - return subnetMap, primaryMap, currentHeight, err + return subnetMap, currentHeight, err } func (m *manager) GetSubnetID(_ context.Context, chainID ids.ID) (ids.ID, error) { From d9108a7d2600a92e3fddd230817c70235cb3bf30 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 23 Sep 2024 19:12:57 -0400 Subject: [PATCH 048/155] mocks --- vms/platformvm/state/mock_state.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 4780ede0da5e..41d06025d3f8 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -542,6 +542,20 @@ func (mr *MockStateMockRecorder) GetRewardUTXOs(txID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockState)(nil).GetRewardUTXOs), txID) } +// GetSoVExcess mocks base method. +func (m *MockState) GetSoVExcess() gas.Gas { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSoVExcess") + ret0, _ := ret[0].(gas.Gas) + return ret0 +} + +// GetSoVExcess indicates an expected call of GetSoVExcess. +func (mr *MockStateMockRecorder) GetSoVExcess() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockState)(nil).GetSoVExcess)) +} + // GetStartTime mocks base method. func (m *MockState) GetStartTime(nodeID ids.NodeID) (time.Time, error) { m.ctrl.T.Helper() From 98020f1c41a8d793df55d10093cfacf078ece6cd Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 23 Sep 2024 22:22:35 -0400 Subject: [PATCH 049/155] Bound GetNextStakerTime --- vms/platformvm/block/builder/builder.go | 17 ++++--- .../block/executor/proposal_block_test.go | 5 ++- .../block/executor/standard_block_test.go | 2 +- vms/platformvm/block/executor/verifier.go | 18 +------- vms/platformvm/state/chain_time_helpers.go | 45 +++++++++---------- .../txs/executor/advance_time_test.go | 7 +-- .../txs/executor/proposal_tx_executor.go | 22 +-------- vms/platformvm/txs/executor/state_changes.go | 44 ++++++++++++------ .../txs/executor/state_changes_test.go | 3 +- 9 files changed, 77 insertions(+), 86 deletions(-) diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index 35ad20adc16c..aa4804cae6d1 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -31,9 +31,15 @@ import ( txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) -// targetBlockSize is maximum number of transaction bytes to place into a -// StandardBlock -const targetBlockSize = 128 * units.KiB +const ( + // targetBlockSize is maximum number of transaction bytes to place into a + // StandardBlock + targetBlockSize = 128 * units.KiB + + // maxTimeToSleep is the maximum time to sleep between checking if a block + // should be produced. + maxTimeToSleep = time.Hour +) var ( _ Builder = (*builder)(nil) @@ -174,12 +180,13 @@ func (b *builder) durationToSleep() (time.Duration, error) { return 0, fmt.Errorf("%w: %s", errMissingPreferredState, preferredID) } - nextStakerChangeTime, err := state.GetNextStakerChangeTime(preferredState) + now := b.txExecutorBackend.Clk.Time() + maxTimeToAwake := now.Add(maxTimeToSleep) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(preferredState, maxTimeToAwake) if err != nil { return 0, fmt.Errorf("%w of %s: %w", errCalculatingNextStakerTime, preferredID, err) } - now := b.txExecutorBackend.Clk.Time() return nextStakerChangeTime.Sub(now), nil } diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 60520a83ccf2..41553c151309 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -21,6 +21,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/iterator/iteratormock" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/gas" "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -279,7 +280,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { block := env.blkManager.NewBlock(statelessProposalBlock) err = block.Verify(context.Background()) - require.ErrorIs(err, errChildBlockEarlierThanParent) + require.ErrorIs(err, executor.ErrChildBlockEarlierThanParent) } { @@ -1377,7 +1378,7 @@ func TestAddValidatorProposalBlock(t *testing.T) { // Advance time until next staker change time is [validatorEndTime] for { - nextStakerChangeTime, err := state.GetNextStakerChangeTime(env.state) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(env.state, mockable.MaxTime) require.NoError(err) if nextStakerChangeTime.Equal(validatorEndTime) { break diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index fa64eee74697..065bd2098616 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -212,7 +212,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) err = block.Verify(context.Background()) - require.ErrorIs(err, errChildBlockEarlierThanParent) + require.ErrorIs(err, executor.ErrChildBlockEarlierThanParent) } { diff --git a/vms/platformvm/block/executor/verifier.go b/vms/platformvm/block/executor/verifier.go index 532dc4d4b6f6..abcbc566a303 100644 --- a/vms/platformvm/block/executor/verifier.go +++ b/vms/platformvm/block/executor/verifier.go @@ -27,7 +27,6 @@ var ( errApricotBlockIssuedAfterFork = errors.New("apricot block issued after fork") errBanffStandardBlockWithoutChanges = errors.New("BanffStandardBlock performs no state changes") errIncorrectBlockHeight = errors.New("incorrect block height") - errChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") errOptionBlockTimestampNotMatchingParent = errors.New("option block proposed timestamp not matching parent block one") ) @@ -278,26 +277,11 @@ func (v *verifier) banffNonOptionBlock(b block.BanffBlock) error { } newChainTime := b.Timestamp() - parentChainTime := parentState.GetTimestamp() - if newChainTime.Before(parentChainTime) { - return fmt.Errorf( - "%w: proposed timestamp (%s), chain time (%s)", - errChildBlockEarlierThanParent, - newChainTime, - parentChainTime, - ) - } - - nextStakerChangeTime, err := state.GetNextStakerChangeTime(parentState) - if err != nil { - return fmt.Errorf("could not verify block timestamp: %w", err) - } - now := v.txExecutorBackend.Clk.Time() return executor.VerifyNewChainTime( newChainTime, - nextStakerChangeTime, now, + parentState, ) } diff --git a/vms/platformvm/state/chain_time_helpers.go b/vms/platformvm/state/chain_time_helpers.go index 8b861f69a265..0a2fbfa8ba17 100644 --- a/vms/platformvm/state/chain_time_helpers.go +++ b/vms/platformvm/state/chain_time_helpers.go @@ -7,7 +7,7 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/gas" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -24,7 +24,10 @@ func NextBlockTime(state Chain, clk *mockable.Clock) (time.Time, bool, error) { } // [timestamp] = max(now, parentTime) - nextStakerChangeTime, err := GetNextStakerChangeTime(state) + // If the NextStakerChangeTime is after timestamp, then we shouldn't return + // that the time was capped. + nextStakerChangeTimeCap := timestamp.Add(time.Second) + nextStakerChangeTime, err := GetNextStakerChangeTime(state, nextStakerChangeTimeCap) if err != nil { return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) } @@ -39,37 +42,33 @@ func NextBlockTime(state Chain, clk *mockable.Clock) (time.Time, bool, error) { } // GetNextStakerChangeTime returns the next time a staker will be either added -// or removed to/from the current validator set. -func GetNextStakerChangeTime(state Chain) (time.Time, error) { - currentStakerIterator, err := state.GetCurrentStakerIterator() +// or removed to/from the current validator set. If the next staker change time +// is further in the future than [defaultTime], then [defaultTime] is returned. +func GetNextStakerChangeTime(state Chain, defaultTime time.Time) (time.Time, error) { + currentIterator, err := state.GetCurrentStakerIterator() if err != nil { return time.Time{}, err } - defer currentStakerIterator.Release() + defer currentIterator.Release() - pendingStakerIterator, err := state.GetPendingStakerIterator() + pendingIterator, err := state.GetPendingStakerIterator() if err != nil { return time.Time{}, err } - defer pendingStakerIterator.Release() + defer pendingIterator.Release() - hasCurrentStaker := currentStakerIterator.Next() - hasPendingStaker := pendingStakerIterator.Next() - switch { - case hasCurrentStaker && hasPendingStaker: - nextCurrentTime := currentStakerIterator.Value().NextTime - nextPendingTime := pendingStakerIterator.Value().NextTime - if nextCurrentTime.Before(nextPendingTime) { - return nextCurrentTime, nil + for _, it := range []iterator.Iterator[*Staker]{currentIterator, pendingIterator} { + // If the iterator is empty, skip it + if !it.Next() { + continue + } + + time := it.Value().NextTime + if time.Before(defaultTime) { + defaultTime = time } - return nextPendingTime, nil - case hasCurrentStaker: - return currentStakerIterator.Value().NextTime, nil - case hasPendingStaker: - return pendingStakerIterator.Value().NextTime, nil - default: - return time.Time{}, database.ErrNotFound } + return defaultTime, nil } // PickFeeCalculator creates either a static or a dynamic fee calculator, diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index c301b8bde8bb..5febc90e2cf9 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -99,12 +99,13 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { require.True(ok) } -// Ensure semantic verification fails when proposed timestamp is at or before current timestamp +// Ensure semantic verification fails when proposed timestamp is before the +// current timestamp func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { require := require.New(t) env := newEnvironment(t, upgradetest.ApricotPhase5) - tx, err := newAdvanceTimeTx(t, env.state.GetTimestamp()) + tx, err := newAdvanceTimeTx(t, env.state.GetTimestamp().Add(-time.Second)) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -122,7 +123,7 @@ func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.ErrorIs(err, ErrChildBlockNotAfterParent) + require.ErrorIs(err, ErrChildBlockEarlierThanParent) } // Ensure semantic verification fails when proposed timestamp is after next validator set change time diff --git a/vms/platformvm/txs/executor/proposal_tx_executor.go b/vms/platformvm/txs/executor/proposal_tx_executor.go index 573d6199e2a3..aa8064f2fcd8 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -34,7 +34,6 @@ var ( ErrRemoveStakerTooEarly = errors.New("attempting to remove staker before their end time") ErrRemoveWrongStaker = errors.New("attempting to remove wrong staker") - ErrChildBlockNotAfterParent = errors.New("proposed timestamp not after current chain time") ErrInvalidState = errors.New("generated output isn't valid state") ErrShouldBePermissionlessStaker = errors.New("expected permissionless staker") ErrWrongTxType = errors.New("wrong transaction type") @@ -270,34 +269,17 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { ) } - parentChainTime := e.OnCommitState.GetTimestamp() - if !newChainTime.After(parentChainTime) { - return fmt.Errorf( - "%w, proposed timestamp (%s), chain time (%s)", - ErrChildBlockNotAfterParent, - parentChainTime, - parentChainTime, - ) - } - - // Only allow timestamp to move forward as far as the time of next staker - // set change time - nextStakerChangeTime, err := state.GetNextStakerChangeTime(e.OnCommitState) - if err != nil { - return err - } - now := e.Clk.Time() if err := VerifyNewChainTime( newChainTime, - nextStakerChangeTime, now, + e.OnCommitState, ); err != nil { return err } // Note that state doesn't change if this proposal is aborted - _, err = AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) + _, err := AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) return err } diff --git a/vms/platformvm/txs/executor/state_changes.go b/vms/platformvm/txs/executor/state_changes.go index 55f940fca509..04c11c5c2cd8 100644 --- a/vms/platformvm/txs/executor/state_changes.go +++ b/vms/platformvm/txs/executor/state_changes.go @@ -16,35 +16,35 @@ import ( ) var ( + ErrChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") ErrChildBlockAfterStakerChangeTime = errors.New("proposed timestamp later than next staker change time") ErrChildBlockBeyondSyncBound = errors.New("proposed timestamp is too far in the future relative to local time") ) -// VerifyNewChainTime returns nil if the [newChainTime] is a valid chain time -// given the wall clock time ([now]) and when the next staking set change occurs -// ([nextStakerChangeTime]). +// VerifyNewChainTime returns nil if the [newChainTime] is a valid chain time. // Requires: -// - [newChainTime] <= [nextStakerChangeTime]: so that no staking set changes -// are skipped. +// - [newChainTime] >= [currentChainTime]: to ensure chain time advances +// monotonically. // - [newChainTime] <= [now] + [SyncBound]: to ensure chain time approximates // "real" time. +// - [newChainTime] <= [nextStakerChangeTime]: so that no staking set changes +// are skipped. func VerifyNewChainTime( - newChainTime, - nextStakerChangeTime, + newChainTime time.Time, now time.Time, + currentState state.Chain, ) error { - // Only allow timestamp to move as far forward as the time of the next - // staker set change - if newChainTime.After(nextStakerChangeTime) { + currentChainTime := currentState.GetTimestamp() + if newChainTime.Before(currentChainTime) { return fmt.Errorf( - "%w, proposed timestamp (%s), next staker change time (%s)", - ErrChildBlockAfterStakerChangeTime, + "%w: proposed timestamp (%s), chain time (%s)", + ErrChildBlockEarlierThanParent, newChainTime, - nextStakerChangeTime, + currentChainTime, ) } - // Only allow timestamp to reasonably far forward + // Only allow timestamp to be reasonably far forward maxNewChainTime := now.Add(SyncBound) if newChainTime.After(maxNewChainTime) { return fmt.Errorf( @@ -54,6 +54,22 @@ func VerifyNewChainTime( now, ) } + + nextStakerChangeTime, err := state.GetNextStakerChangeTime(currentState, newChainTime) + if err != nil { + return fmt.Errorf("could not verify block timestamp: %w", err) + } + + // Only allow timestamp to move as far forward as the time of the next + // staker set change + if newChainTime.After(nextStakerChangeTime) { + return fmt.Errorf( + "%w, proposed timestamp (%s), next staker change time (%s)", + ErrChildBlockAfterStakerChangeTime, + newChainTime, + nextStakerChangeTime, + ) + } return nil } diff --git a/vms/platformvm/txs/executor/state_changes_test.go b/vms/platformvm/txs/executor/state_changes_test.go index 5588f4b7da73..d642e2ed1481 100644 --- a/vms/platformvm/txs/executor/state_changes_test.go +++ b/vms/platformvm/txs/executor/state_changes_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/upgrade/upgradetest" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/gas" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -75,7 +76,7 @@ func TestAdvanceTimeTo_UpdatesFeeState(t *testing.T) { // Ensure the invariant that [nextTime <= nextStakerChangeTime] on // AdvanceTimeTo is maintained. - nextStakerChangeTime, err := state.GetNextStakerChangeTime(s) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(s, mockable.MaxTime) require.NoError(err) require.False(nextTime.After(nextStakerChangeTime)) From cc9a08633ebc5cbc3e5cdb2bdab4a4e8cd17492b Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 23 Sep 2024 23:08:08 -0400 Subject: [PATCH 050/155] fix test --- vms/platformvm/txs/executor/advance_time_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index 5febc90e2cf9..fa7d3583c68f 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -126,7 +126,8 @@ func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { require.ErrorIs(err, ErrChildBlockEarlierThanParent) } -// Ensure semantic verification fails when proposed timestamp is after next validator set change time +// Ensure semantic verification fails when proposed timestamp is after next +// validator set change time func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require := require.New(t) env := newEnvironment(t, upgradetest.ApricotPhase5) @@ -167,8 +168,8 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - // fast forward clock to 10 seconds before genesis validators stop validating - env.clk.Set(genesistest.DefaultValidatorEndTime.Add(-10 * time.Second)) + // fast forward clock to when genesis validators stop validating + env.clk.Set(genesistest.DefaultValidatorEndTime) { // Proposes advancing timestamp to 1 second after genesis validators stop validating From a3f0d94117a16025c6daa5381a928178b023f5c2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 23 Sep 2024 23:17:54 -0400 Subject: [PATCH 051/155] add comment --- vms/platformvm/txs/executor/state_changes.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vms/platformvm/txs/executor/state_changes.go b/vms/platformvm/txs/executor/state_changes.go index 04c11c5c2cd8..a30a422506b9 100644 --- a/vms/platformvm/txs/executor/state_changes.go +++ b/vms/platformvm/txs/executor/state_changes.go @@ -55,6 +55,8 @@ func VerifyNewChainTime( ) } + // nextStakerChangeTime is calculated last to ensure that the function is + // able to be calculated efficiently. nextStakerChangeTime, err := state.GetNextStakerChangeTime(currentState, newChainTime) if err != nil { return fmt.Errorf("could not verify block timestamp: %w", err) From de8151890acdf7a1e6ab2df104f7f81c25e89910 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 23 Sep 2024 23:50:06 -0400 Subject: [PATCH 052/155] Add tests --- .../state/chain_time_helpers_test.go | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/vms/platformvm/state/chain_time_helpers_test.go b/vms/platformvm/state/chain_time_helpers_test.go index 3a6304aaeea5..7e1a1786eb61 100644 --- a/vms/platformvm/state/chain_time_helpers_test.go +++ b/vms/platformvm/state/chain_time_helpers_test.go @@ -5,16 +5,131 @@ package state import ( "testing" + "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/upgrade/upgradetest" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/genesis/genesistest" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) +func TestNextBlockTime(t *testing.T) { + tests := []struct { + name string + chainTime time.Time + now time.Time + expectedTime time.Time + expectedCapped bool + }{ + { + name: "parent time is after now", + chainTime: genesistest.DefaultValidatorStartTime, + now: genesistest.DefaultValidatorStartTime.Add(-time.Second), + expectedTime: genesistest.DefaultValidatorStartTime, + expectedCapped: false, + }, + { + name: "parent time is before now", + chainTime: genesistest.DefaultValidatorStartTime, + now: genesistest.DefaultValidatorStartTime.Add(time.Second), + expectedTime: genesistest.DefaultValidatorStartTime.Add(time.Second), + expectedCapped: false, + }, + { + name: "now is at next staker change time", + chainTime: genesistest.DefaultValidatorStartTime, + now: genesistest.DefaultValidatorEndTime, + expectedTime: genesistest.DefaultValidatorEndTime, + expectedCapped: true, + }, + { + name: "now is after next staker change time", + chainTime: genesistest.DefaultValidatorStartTime, + now: genesistest.DefaultValidatorEndTime.Add(time.Second), + expectedTime: genesistest.DefaultValidatorEndTime, + expectedCapped: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var ( + require = require.New(t) + s = newTestState(t, memdb.New()) + clk mockable.Clock + ) + + s.SetTimestamp(test.chainTime) + clk.Set(test.now) + + actualTime, actualCapped, err := NextBlockTime(s, &clk) + require.NoError(err) + require.Equal(test.expectedTime.Local(), actualTime.Local()) + require.Equal(test.expectedCapped, actualCapped) + }) + } +} + +func TestGetNextStakerChangeTime(t *testing.T) { + tests := []struct { + name string + pending []*Staker + maxTime time.Time + expected time.Time + }{ + { + name: "only current validators", + maxTime: mockable.MaxTime, + expected: genesistest.DefaultValidatorEndTime, + }, + { + name: "current and pending validators", + pending: []*Staker{ + { + TxID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + PublicKey: nil, + SubnetID: constants.PrimaryNetworkID, + Weight: 1, + StartTime: genesistest.DefaultValidatorStartTime.Add(time.Second), + EndTime: genesistest.DefaultValidatorEndTime, + NextTime: genesistest.DefaultValidatorStartTime.Add(time.Second), + Priority: txs.PrimaryNetworkValidatorPendingPriority, + }, + }, + maxTime: mockable.MaxTime, + expected: genesistest.DefaultValidatorStartTime.Add(time.Second), + }, + { + name: "restricted timestamp", + maxTime: genesistest.DefaultValidatorStartTime, + expected: genesistest.DefaultValidatorStartTime, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var ( + require = require.New(t) + s = newTestState(t, memdb.New()) + ) + for _, staker := range test.pending { + require.NoError(s.PutPendingValidator(staker)) + } + + actual, err := GetNextStakerChangeTime(s, test.maxTime) + require.NoError(err) + require.Equal(test.expected.Local(), actual.Local()) + }) + } +} + func TestPickFeeCalculator(t *testing.T) { var ( createAssetTxFee = genesis.LocalParams.CreateAssetTxFee From 8fe987cf89f72411161ad06feffd1740016b98d2 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 30 Sep 2024 19:20:51 -0400 Subject: [PATCH 053/155] ACP-77: Update warp messages to follow new specification --- vms/platformvm/warp/message/codec.go | 1 - vms/platformvm/warp/message/payload_test.go | 30 +----------- .../warp/message/register_subnet_validator.go | 8 ++-- .../message/set_subnet_validator_weight.go | 48 ------------------- .../set_subnet_validator_weight_test.go | 28 ----------- .../warp/message/subnet_validator_weight.go | 9 +++- 6 files changed, 12 insertions(+), 112 deletions(-) delete mode 100644 vms/platformvm/warp/message/set_subnet_validator_weight.go delete mode 100644 vms/platformvm/warp/message/set_subnet_validator_weight_test.go diff --git a/vms/platformvm/warp/message/codec.go b/vms/platformvm/warp/message/codec.go index b5ade8de30a9..4dda85a1c76e 100644 --- a/vms/platformvm/warp/message/codec.go +++ b/vms/platformvm/warp/message/codec.go @@ -23,7 +23,6 @@ func init() { lc.RegisterType(&SubnetConversion{}), lc.RegisterType(&RegisterSubnetValidator{}), lc.RegisterType(&SubnetValidatorRegistration{}), - lc.RegisterType(&SetSubnetValidatorWeight{}), lc.RegisterType(&SubnetValidatorWeight{}), Codec.RegisterCodec(CodecVersion, lc), ) diff --git a/vms/platformvm/warp/message/payload_test.go b/vms/platformvm/warp/message/payload_test.go index 802f84e01e85..4034d7ba7a3f 100644 --- a/vms/platformvm/warp/message/payload_test.go +++ b/vms/platformvm/warp/message/payload_test.go @@ -128,41 +128,13 @@ func TestParse(t *testing.T) { false, )), }, - { - name: "SetSubnetValidatorWeight", - bytes: []byte{ - // Codec version: - 0x00, 0x00, - // Payload type = SetSubnetValidatorWeight: - 0x00, 0x00, 0x00, 0x03, - // ValidationID: - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - // Nonce: - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - // Weight: - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - }, - expected: mustCreate(NewSetSubnetValidatorWeight( - ids.ID{ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - }, - 0x2122232425262728, - 0x292a2b2c2d2e2f30, - )), - }, { name: "SubnetValidatorWeight", bytes: []byte{ // Codec version: 0x00, 0x00, // Payload type = SubnetValidatorWeight: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x03, // ValidationID: 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, diff --git a/vms/platformvm/warp/message/register_subnet_validator.go b/vms/platformvm/warp/message/register_subnet_validator.go index 4a6bd9240435..be8e0495ac4f 100644 --- a/vms/platformvm/warp/message/register_subnet_validator.go +++ b/vms/platformvm/warp/message/register_subnet_validator.go @@ -8,15 +8,15 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/types" ) // RegisterSubnetValidator adds a validator to the subnet. type RegisterSubnetValidator struct { payload - SubnetID ids.ID `serialize:"true" json:"subnetID"` - // TODO: Use a 32-byte nodeID here - NodeID ids.NodeID `serialize:"true" json:"nodeID"` + SubnetID ids.ID `serialize:"true" json:"subnetID"` + NodeID types.JSONByteSlice `serialize:"true" json:"nodeID"` Weight uint64 `serialize:"true" json:"weight"` BLSPublicKey [bls.PublicKeyLen]byte `serialize:"true" json:"blsPublicKey"` Expiry uint64 `serialize:"true" json:"expiry"` @@ -32,7 +32,7 @@ func NewRegisterSubnetValidator( ) (*RegisterSubnetValidator, error) { msg := &RegisterSubnetValidator{ SubnetID: subnetID, - NodeID: nodeID, + NodeID: nodeID[:], Weight: weight, BLSPublicKey: blsPublicKey, Expiry: expiry, diff --git a/vms/platformvm/warp/message/set_subnet_validator_weight.go b/vms/platformvm/warp/message/set_subnet_validator_weight.go deleted file mode 100644 index bde19f7ae67a..000000000000 --- a/vms/platformvm/warp/message/set_subnet_validator_weight.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/ids" -) - -// SetSubnetValidatorWeight updates the weight of the specified validator. -type SetSubnetValidatorWeight struct { - payload - - ValidationID ids.ID `serialize:"true" json:"validationID"` - Nonce uint64 `serialize:"true" json:"nonce"` - Weight uint64 `serialize:"true" json:"weight"` -} - -// NewSetSubnetValidatorWeight creates a new initialized -// SetSubnetValidatorWeight. -func NewSetSubnetValidatorWeight( - validationID ids.ID, - nonce uint64, - weight uint64, -) (*SetSubnetValidatorWeight, error) { - msg := &SetSubnetValidatorWeight{ - ValidationID: validationID, - Nonce: nonce, - Weight: weight, - } - return msg, initialize(msg) -} - -// ParseSetSubnetValidatorWeight parses bytes into an initialized -// SetSubnetValidatorWeight. -func ParseSetSubnetValidatorWeight(b []byte) (*SetSubnetValidatorWeight, error) { - payloadIntf, err := Parse(b) - if err != nil { - return nil, err - } - payload, ok := payloadIntf.(*SetSubnetValidatorWeight) - if !ok { - return nil, fmt.Errorf("%w: %T", ErrWrongType, payloadIntf) - } - return payload, nil -} diff --git a/vms/platformvm/warp/message/set_subnet_validator_weight_test.go b/vms/platformvm/warp/message/set_subnet_validator_weight_test.go deleted file mode 100644 index 9e54cf2e34b2..000000000000 --- a/vms/platformvm/warp/message/set_subnet_validator_weight_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "math/rand" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestSetSubnetValidatorWeight(t *testing.T) { - require := require.New(t) - - msg, err := NewSetSubnetValidatorWeight( - ids.GenerateTestID(), - rand.Uint64(), //#nosec G404 - rand.Uint64(), //#nosec G404 - ) - require.NoError(err) - - parsed, err := ParseSetSubnetValidatorWeight(msg.Bytes()) - require.NoError(err) - require.Equal(msg, parsed) -} diff --git a/vms/platformvm/warp/message/subnet_validator_weight.go b/vms/platformvm/warp/message/subnet_validator_weight.go index 4091ff9af8ac..eb5132a64874 100644 --- a/vms/platformvm/warp/message/subnet_validator_weight.go +++ b/vms/platformvm/warp/message/subnet_validator_weight.go @@ -9,8 +9,13 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -// SubnetValidatorWeight reports the current nonce and weight of a validator -// registered on the P-chain. +// SubnetValidatorWeight is both received and sent by the P-chain. +// +// If the P-chain is receiving this message, it is treated as a command to +// update the weight of the validator. +// +// If the P-chain is sending this message, it is reporting the current nonce and +// weight of the validator. type SubnetValidatorWeight struct { payload From 30efc6d68fdda75f46c87c652bd5ca1d5d1235f5 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 30 Sep 2024 21:00:38 -0400 Subject: [PATCH 054/155] ACP-77: Add warp message helpers to follow new specification --- vms/platformvm/warp/message/payload_test.go | 52 +++++- .../warp/message/register_subnet_validator.go | 84 +++++++-- .../message/register_subnet_validator_test.go | 161 +++++++++++++++++- .../warp/message/subnet_conversion.go | 32 +++- .../warp/message/subnet_conversion_test.go | 82 +++++++++ .../warp/message/subnet_validator_weight.go | 11 ++ .../message/subnet_validator_weight_test.go | 38 +++++ 7 files changed, 438 insertions(+), 22 deletions(-) diff --git a/vms/platformvm/warp/message/payload_test.go b/vms/platformvm/warp/message/payload_test.go index 4034d7ba7a3f..dc2013f34826 100644 --- a/vms/platformvm/warp/message/payload_test.go +++ b/vms/platformvm/warp/message/payload_test.go @@ -63,21 +63,39 @@ func TestParse(t *testing.T) { 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + // NodeID Length: + 0x00, 0x00, 0x00, 0x14, // NodeID: 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, - // Weight: - 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, // BLSPublicKey: + 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, - 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, // Expiry: - 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, + 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, + // Remaining Balance Owner Threshold: + 0x6d, 0x6e, 0x6f, 0x70, + // Remaining Balance Owner Addresses Length: + 0x00, 0x00, 0x00, 0x01, + // Remaining Balance Owner Address[0]: + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, + 0x81, 0x82, 0x83, 0x84, + // Disable Owner Threshold: + 0x85, 0x86, 0x87, 0x88, + // Disable Owner Addresses Length: + 0x00, 0x00, 0x00, 0x01, + // Disable Owner Address[0]: + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0x9b, 0x9c, + // Weight: + 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, }, expected: mustCreate(NewRegisterSubnetValidator( ids.ID{ @@ -91,16 +109,36 @@ func TestParse(t *testing.T) { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, }, - 0x35363738393a3b3c, [bls.PublicKeyLen]byte{ + 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, - 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, }, - 0x6d6e6f7071727374, + 0x65666768696a6b6c, + PChainOwner{ + Threshold: 0x6d6e6f70, + Addresses: []ids.ShortID{ + { + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, + 0x81, 0x82, 0x83, 0x84, + }, + }, + }, + PChainOwner{ + Threshold: 0x85868788, + Addresses: []ids.ShortID{ + { + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0x9b, 0x9c, + }, + }, + }, + 0x9d9e9fa0a1a2a3a4, )), }, { diff --git a/vms/platformvm/warp/message/register_subnet_validator.go b/vms/platformvm/warp/message/register_subnet_validator.go index be8e0495ac4f..cf0b1cbcd569 100644 --- a/vms/platformvm/warp/message/register_subnet_validator.go +++ b/vms/platformvm/warp/message/register_subnet_validator.go @@ -4,38 +4,100 @@ package message import ( + "errors" "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/vms/types" ) +var ( + ErrInvalidSubnetID = errors.New("invalid subnet ID") + ErrInvalidWeight = errors.New("invalid weight") + ErrInvalidNodeID = errors.New("invalid node ID") + ErrInvalidOwner = errors.New("invalid owner") +) + +type PChainOwner struct { + // The threshold number of `Addresses` that must provide a signature in + // order for the `PChainOwner` to be considered valid. + Threshold uint32 `serialize:"true" json:"threshold"` + // The addresses that are allowed to sign to authenticate a `PChainOwner`. + Addresses []ids.ShortID `serialize:"true" json:"addresses"` +} + // RegisterSubnetValidator adds a validator to the subnet. type RegisterSubnetValidator struct { payload - SubnetID ids.ID `serialize:"true" json:"subnetID"` - NodeID types.JSONByteSlice `serialize:"true" json:"nodeID"` - Weight uint64 `serialize:"true" json:"weight"` - BLSPublicKey [bls.PublicKeyLen]byte `serialize:"true" json:"blsPublicKey"` - Expiry uint64 `serialize:"true" json:"expiry"` + SubnetID ids.ID `serialize:"true" json:"subnetID"` + NodeID types.JSONByteSlice `serialize:"true" json:"nodeID"` + BLSPublicKey [bls.PublicKeyLen]byte `serialize:"true" json:"blsPublicKey"` + Expiry uint64 `serialize:"true" json:"expiry"` + RemainingBalanceOwner PChainOwner `serialize:"true" json:"remainingBalanceOwner"` + DisableOwner PChainOwner `serialize:"true" json:"disableOwner"` + Weight uint64 `serialize:"true" json:"weight"` +} + +func (r *RegisterSubnetValidator) Verify() error { + if r.SubnetID == constants.PrimaryNetworkID { + return ErrInvalidSubnetID + } + if r.Weight == 0 { + return ErrInvalidWeight + } + + nodeID, err := ids.ToNodeID(r.NodeID) + if err != nil { + return fmt.Errorf("%w: %w", ErrInvalidNodeID, err) + } + if nodeID == ids.EmptyNodeID { + return fmt.Errorf("%w: empty nodeID is disallowed", ErrInvalidNodeID) + } + + err = verify.All( + &secp256k1fx.OutputOwners{ + Threshold: r.RemainingBalanceOwner.Threshold, + Addrs: r.RemainingBalanceOwner.Addresses, + }, + &secp256k1fx.OutputOwners{ + Threshold: r.DisableOwner.Threshold, + Addrs: r.DisableOwner.Addresses, + }, + ) + if err != nil { + return fmt.Errorf("%w: %w", ErrInvalidOwner, err) + } + return nil +} + +func (r *RegisterSubnetValidator) ValidationID() ids.ID { + return hashing.ComputeHash256Array(r.Bytes()) } // NewRegisterSubnetValidator creates a new initialized RegisterSubnetValidator. func NewRegisterSubnetValidator( subnetID ids.ID, nodeID ids.NodeID, - weight uint64, blsPublicKey [bls.PublicKeyLen]byte, expiry uint64, + remainingBalanceOwner PChainOwner, + disableOwner PChainOwner, + weight uint64, ) (*RegisterSubnetValidator, error) { msg := &RegisterSubnetValidator{ - SubnetID: subnetID, - NodeID: nodeID[:], - Weight: weight, - BLSPublicKey: blsPublicKey, - Expiry: expiry, + SubnetID: subnetID, + NodeID: nodeID[:], + BLSPublicKey: blsPublicKey, + Expiry: expiry, + RemainingBalanceOwner: remainingBalanceOwner, + DisableOwner: disableOwner, + Weight: weight, } return msg, initialize(msg) } diff --git a/vms/platformvm/warp/message/register_subnet_validator_test.go b/vms/platformvm/warp/message/register_subnet_validator_test.go index 641569ae6e61..68898e47979f 100644 --- a/vms/platformvm/warp/message/register_subnet_validator_test.go +++ b/vms/platformvm/warp/message/register_subnet_validator_test.go @@ -10,7 +10,9 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/hashing" ) func newBLSPublicKey(t *testing.T) [bls.PublicKeyLen]byte { @@ -28,13 +30,168 @@ func TestRegisterSubnetValidator(t *testing.T) { msg, err := NewRegisterSubnetValidator( ids.GenerateTestID(), ids.GenerateTestNodeID(), - rand.Uint64(), //#nosec G404 newBLSPublicKey(t), rand.Uint64(), //#nosec G404 + PChainOwner{ + Threshold: rand.Uint32(), //#nosec G404 + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + PChainOwner{ + Threshold: rand.Uint32(), //#nosec G404 + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + rand.Uint64(), //#nosec G404 ) require.NoError(err) - parsed, err := ParseRegisterSubnetValidator(msg.Bytes()) + bytes := msg.Bytes() + var expectedValidationID ids.ID = hashing.ComputeHash256Array(bytes) + require.Equal(expectedValidationID, msg.ValidationID()) + + parsed, err := ParseRegisterSubnetValidator(bytes) require.NoError(err) require.Equal(msg, parsed) } + +func TestRegisterSubnetValidator_Verify(t *testing.T) { + mustCreate := func(msg *RegisterSubnetValidator, err error) *RegisterSubnetValidator { + require.NoError(t, err) + return msg + } + tests := []struct { + name string + msg *RegisterSubnetValidator + expected error + }{ + { + name: "PrimaryNetworkID", + msg: mustCreate(NewRegisterSubnetValidator( + constants.PrimaryNetworkID, + ids.GenerateTestNodeID(), + newBLSPublicKey(t), + rand.Uint64(), //#nosec G404 + PChainOwner{ + Threshold: 1, + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + PChainOwner{ + Threshold: 0, + }, + 1, + )), + expected: ErrInvalidSubnetID, + }, + { + name: "Weight = 0", + msg: mustCreate(NewRegisterSubnetValidator( + ids.GenerateTestID(), + ids.GenerateTestNodeID(), + newBLSPublicKey(t), + rand.Uint64(), //#nosec G404 + PChainOwner{ + Threshold: 1, + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + PChainOwner{ + Threshold: 0, + }, + 0, + )), + expected: ErrInvalidWeight, + }, + { + name: "Invalid NodeID Length", + msg: &RegisterSubnetValidator{ + SubnetID: ids.GenerateTestID(), + NodeID: nil, + BLSPublicKey: newBLSPublicKey(t), + Expiry: rand.Uint64(), //#nosec G404 + RemainingBalanceOwner: PChainOwner{ + Threshold: 1, + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + DisableOwner: PChainOwner{ + Threshold: 0, + }, + Weight: 1, + }, + expected: ErrInvalidNodeID, + }, + { + name: "Invalid NodeID", + msg: mustCreate(NewRegisterSubnetValidator( + ids.GenerateTestID(), + ids.EmptyNodeID, + newBLSPublicKey(t), + rand.Uint64(), //#nosec G404 + PChainOwner{ + Threshold: 1, + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + PChainOwner{ + Threshold: 0, + }, + 1, + )), + expected: ErrInvalidNodeID, + }, + { + name: "Invalid Owner", + msg: mustCreate(NewRegisterSubnetValidator( + ids.GenerateTestID(), + ids.GenerateTestNodeID(), + newBLSPublicKey(t), + rand.Uint64(), //#nosec G404 + PChainOwner{ + Threshold: 0, + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + PChainOwner{ + Threshold: 0, + }, + 1, + )), + expected: ErrInvalidOwner, + }, + { + name: "Valid", + msg: mustCreate(NewRegisterSubnetValidator( + ids.GenerateTestID(), + ids.GenerateTestNodeID(), + newBLSPublicKey(t), + rand.Uint64(), //#nosec G404 + PChainOwner{ + Threshold: 1, + Addresses: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + PChainOwner{ + Threshold: 0, + }, + 1, + )), + expected: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.msg.Verify() + require.ErrorIs(t, err, test.expected) + }) + } +} diff --git a/vms/platformvm/warp/message/subnet_conversion.go b/vms/platformvm/warp/message/subnet_conversion.go index d4ca5587453d..a93354fbe446 100644 --- a/vms/platformvm/warp/message/subnet_conversion.go +++ b/vms/platformvm/warp/message/subnet_conversion.go @@ -7,13 +7,41 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/vms/types" ) -// SubnetConversion reports summary of the subnet conversation that occurred on -// the P-chain. +type SubnetConversionValidatorData struct { + NodeID types.JSONByteSlice `serialize:"true" json:"nodeID"` + BLSPublicKey [bls.PublicKeyLen]byte `serialize:"true" json:"blsPublicKey"` + Weight uint64 `serialize:"true" json:"weight"` +} + +type SubnetConversionData struct { + SubnetID ids.ID `serialize:"true" json:"subnetID"` + ManagerChainID ids.ID `serialize:"true" json:"managerChainID"` + ManagerAddress types.JSONByteSlice `serialize:"true" json:"managerAddress"` + Validators []SubnetConversionValidatorData `serialize:"true" json:"validators"` +} + +// SubnetConversionID creates a subnet conversion ID from the provided subnet +// conversion data. +func SubnetConversionID(data SubnetConversionData) (ids.ID, error) { + bytes, err := Codec.Marshal(CodecVersion, &data) + if err != nil { + return ids.Empty, err + } + return hashing.ComputeHash256Array(bytes), nil +} + +// SubnetConversion reports the summary of the subnet conversation that occurred +// on the P-chain. type SubnetConversion struct { payload + // ID of the subnet conversion. It is typically generated by calling + // SubnetConversionID. ID ids.ID `serialize:"true" json:"id"` } diff --git a/vms/platformvm/warp/message/subnet_conversion_test.go b/vms/platformvm/warp/message/subnet_conversion_test.go index ad823291eb09..1da07f75e344 100644 --- a/vms/platformvm/warp/message/subnet_conversion_test.go +++ b/vms/platformvm/warp/message/subnet_conversion_test.go @@ -9,8 +9,90 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/vms/types" ) +func TestSubnetConversionID(t *testing.T) { + require := require.New(t) + + subnetConversionDataBytes := []byte{ + // Codec version: + 0x00, 0x00, + // SubnetID: + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + // ManagerChainID: + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, + // ManagerAddress Length: + 0x00, 0x00, 0x00, 0x01, + // ManagerAddress: + 0x41, + // Validators Length: + 0x00, 0x00, 0x00, 0x01, + // Validator[0]: + // NodeID Length: + 0x00, 0x00, 0x00, 0x14, + // NodeID: + 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, + 0x52, 0x53, 0x54, 0x55, + // BLSPublicKey: + 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, + 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, + 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, + 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, + 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, + 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, + // Weight: + 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, + } + var expectedSubnetConversionID ids.ID = hashing.ComputeHash256Array(subnetConversionDataBytes) + + subnetConversionData := SubnetConversionData{ + SubnetID: ids.ID{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + }, + ManagerChainID: ids.ID{ + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, + }, + ManagerAddress: []byte{0x41}, + Validators: []SubnetConversionValidatorData{ + { + NodeID: types.JSONByteSlice([]byte{ + 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, + 0x52, 0x53, 0x54, 0x55, + }), + BLSPublicKey: [bls.PublicKeyLen]byte{ + 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, + 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, + 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, + 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, + 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, + 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, + }, + Weight: 0x868788898a8b8c8d, + }, + }, + } + subnetConversionID, err := SubnetConversionID(subnetConversionData) + require.NoError(err) + require.Equal(expectedSubnetConversionID, subnetConversionID) +} + func TestSubnetConversion(t *testing.T) { require := require.New(t) diff --git a/vms/platformvm/warp/message/subnet_validator_weight.go b/vms/platformvm/warp/message/subnet_validator_weight.go index eb5132a64874..dcfa6c5a16c0 100644 --- a/vms/platformvm/warp/message/subnet_validator_weight.go +++ b/vms/platformvm/warp/message/subnet_validator_weight.go @@ -4,11 +4,15 @@ package message import ( + "errors" "fmt" + "math" "github.com/ava-labs/avalanchego/ids" ) +var ErrNonceReservedForRemoval = errors.New("maxUint64 nonce is reserved for removal") + // SubnetValidatorWeight is both received and sent by the P-chain. // // If the P-chain is receiving this message, it is treated as a command to @@ -24,6 +28,13 @@ type SubnetValidatorWeight struct { Weight uint64 `serialize:"true" json:"weight"` } +func (s *SubnetValidatorWeight) Verify() error { + if s.Nonce == math.MaxUint64 && s.Weight != 0 { + return ErrNonceReservedForRemoval + } + return nil +} + // NewSubnetValidatorWeight creates a new initialized SubnetValidatorWeight. func NewSubnetValidatorWeight( validationID ids.ID, diff --git a/vms/platformvm/warp/message/subnet_validator_weight_test.go b/vms/platformvm/warp/message/subnet_validator_weight_test.go index faa67ae5c384..17cef30b88cb 100644 --- a/vms/platformvm/warp/message/subnet_validator_weight_test.go +++ b/vms/platformvm/warp/message/subnet_validator_weight_test.go @@ -4,6 +4,7 @@ package message import ( + "math" "math/rand" "testing" @@ -26,3 +27,40 @@ func TestSubnetValidatorWeight(t *testing.T) { require.NoError(err) require.Equal(msg, parsed) } + +func TestSubnetValidatorWeight_Verify(t *testing.T) { + mustCreate := func(msg *SubnetValidatorWeight, err error) *SubnetValidatorWeight { + require.NoError(t, err) + return msg + } + tests := []struct { + name string + msg *SubnetValidatorWeight + expected error + }{ + { + name: "Invalid Nonce", + msg: mustCreate(NewSubnetValidatorWeight( + ids.GenerateTestID(), + math.MaxUint64, + 1, + )), + expected: ErrNonceReservedForRemoval, + }, + { + name: "Valid", + msg: mustCreate(NewSubnetValidatorWeight( + ids.GenerateTestID(), + math.MaxUint64, + 0, + )), + expected: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.msg.Verify() + require.ErrorIs(t, err, test.expected) + }) + } +} From 903f7c3e9b62ac2b46ee7fb7eaade6bc92b94a14 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 11 Jun 2024 13:43:14 -0400 Subject: [PATCH 055/155] migrate x/sync to p2p Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/p2p/p2ptest/client.go | 24 ++--- network/p2p/p2ptest/client_test.go | 4 +- x/sync/client_test.go | 19 +--- x/sync/manager.go | 23 ++--- x/sync/network_server.go | 75 +++++++------- x/sync/network_server_test.go | 21 ++-- x/sync/sync_test.go | 152 +++++++++++++++-------------- 7 files changed, 155 insertions(+), 163 deletions(-) diff --git a/network/p2p/p2ptest/client.go b/network/p2p/p2ptest/client.go index b75654028666..747904b40ffb 100644 --- a/network/p2p/p2ptest/client.go +++ b/network/p2p/p2ptest/client.go @@ -21,25 +21,19 @@ import ( // NewClient generates a client-server pair and returns the client used to // communicate with a server with the specified handler -func NewClient( - t *testing.T, - ctx context.Context, - handler p2p.Handler, - clientNodeID ids.NodeID, - serverNodeID ids.NodeID, -) *p2p.Client { +func NewClient(t *testing.T, rootCtx context.Context, handler p2p.Handler) *p2p.Client { clientSender := &enginetest.Sender{} serverSender := &enginetest.Sender{} + clientNodeID := ids.GenerateTestNodeID() clientNetwork, err := p2p.NewNetwork(logging.NoLog{}, clientSender, prometheus.NewRegistry(), "") require.NoError(t, err) + serverNodeID := ids.GenerateTestNodeID() serverNetwork, err := p2p.NewNetwork(logging.NoLog{}, serverSender, prometheus.NewRegistry(), "") require.NoError(t, err) clientSender.SendAppGossipF = func(ctx context.Context, _ common.SendConfig, gossipBytes []byte) error { - // Send the request asynchronously to avoid deadlock when the server - // sends the response back to the client go func() { require.NoError(t, serverNetwork.AppGossip(ctx, clientNodeID, gossipBytes)) }() @@ -58,8 +52,6 @@ func NewClient( } serverSender.SendAppResponseF = func(ctx context.Context, _ ids.NodeID, requestID uint32, responseBytes []byte) error { - // Send the request asynchronously to avoid deadlock when the server - // sends the response back to the client go func() { require.NoError(t, clientNetwork.AppResponse(ctx, serverNodeID, requestID, responseBytes)) }() @@ -68,8 +60,6 @@ func NewClient( } serverSender.SendAppErrorF = func(ctx context.Context, _ ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { - // Send the request asynchronously to avoid deadlock when the server - // sends the response back to the client go func() { require.NoError(t, clientNetwork.AppRequestFailed(ctx, serverNodeID, requestID, &common.AppError{ Code: errorCode, @@ -80,10 +70,10 @@ func NewClient( return nil } - require.NoError(t, clientNetwork.Connected(ctx, clientNodeID, nil)) - require.NoError(t, clientNetwork.Connected(ctx, serverNodeID, nil)) - require.NoError(t, serverNetwork.Connected(ctx, clientNodeID, nil)) - require.NoError(t, serverNetwork.Connected(ctx, serverNodeID, nil)) + require.NoError(t, clientNetwork.Connected(rootCtx, clientNodeID, nil)) + require.NoError(t, clientNetwork.Connected(rootCtx, serverNodeID, nil)) + require.NoError(t, serverNetwork.Connected(rootCtx, clientNodeID, nil)) + require.NoError(t, serverNetwork.Connected(rootCtx, serverNodeID, nil)) require.NoError(t, serverNetwork.AddHandler(0, handler)) return clientNetwork.NewClient(0) diff --git a/network/p2p/p2ptest/client_test.go b/network/p2p/p2ptest/client_test.go index 45ae970ecf0f..cef624aaccbc 100644 --- a/network/p2p/p2ptest/client_test.go +++ b/network/p2p/p2ptest/client_test.go @@ -27,7 +27,7 @@ func TestNewClient_AppGossip(t *testing.T) { }, } - client := NewClient(t, ctx, testHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + client := NewClient(t, ctx, testHandler) require.NoError(client.AppGossip(ctx, common.SendConfig{}, []byte("foobar"))) <-appGossipChan } @@ -94,7 +94,7 @@ func TestNewClient_AppRequest(t *testing.T) { }, } - client := NewClient(t, ctx, testHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + client := NewClient(t, ctx, testHandler) require.NoError(tt.appRequestF( ctx, client, diff --git a/x/sync/client_test.go b/x/sync/client_test.go index decc3e20405d..2633071439da 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -38,12 +38,12 @@ func newDefaultDBConfig() merkledb.Config { } } -func newFlakyRangeProofHandler( +func newModifiedRangeProofHandler( t *testing.T, db merkledb.MerkleDB, modifyResponse func(response *merkledb.RangeProof), ) p2p.Handler { - handler := NewGetRangeProofHandler(logging.NoLog{}, db) + handler := NewSyncGetRangeProofHandler(logging.NoLog{}, db) c := counter{m: 2} return &p2p.TestHandler{ @@ -74,12 +74,12 @@ func newFlakyRangeProofHandler( } } -func newFlakyChangeProofHandler( +func newModifiedChangeProofHandler( t *testing.T, db merkledb.MerkleDB, modifyResponse func(response *merkledb.ChangeProof), ) p2p.Handler { - handler := NewGetChangeProofHandler(logging.NoLog{}, db) + handler := NewSyncGetChangeProofHandler(logging.NoLog{}, db) c := counter{m: 2} return &p2p.TestHandler{ @@ -145,14 +145,3 @@ func (c *counter) Inc() int { c.i++ return result } - -type waitingHandler struct { - p2p.NoOpHandler - handler p2p.Handler - updatedRootChan chan struct{} -} - -func (w *waitingHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { - <-w.updatedRootChan - return w.handler.AppRequest(ctx, nodeID, deadline, requestBytes) -} diff --git a/x/sync/manager.go b/x/sync/manager.go index ddcdc1637088..dd176c223033 100644 --- a/x/sync/manager.go +++ b/x/sync/manager.go @@ -41,7 +41,7 @@ var ( ErrAlreadyStarted = errors.New("cannot start a Manager that has already been started") ErrAlreadyClosed = errors.New("Manager is closed") ErrNoRangeProofClientProvided = errors.New("range proof client is a required field of the sync config") - ErrNoChangeProofClientProvided = errors.New("change proof client is a required field of the sync config") + ErrNoChangeProofClientProvided = errors.New("change proofclient is a required field of the sync config") ErrNoDatabaseProvided = errors.New("sync database is a required field of the sync config") ErrNoLogProvided = errors.New("log is a required field of the sync config") ErrZeroWorkLimit = errors.New("simultaneous work limit must be greater than 0") @@ -305,12 +305,7 @@ func (m *Manager) doWork(ctx context.Context, work *workItem) { return } - select { - case <-ctx.Done(): - m.finishWorkItem() - return - case <-time.After(waitTime): - } + <-time.After(waitTime) if work.localRootID == ids.Empty { // the keys in this range have not been downloaded, so get all key/values @@ -373,8 +368,7 @@ func (m *Manager) requestChangeProof(ctx context.Context, work *workItem) { defer m.finishWorkItem() if err := m.handleChangeProofResponse(ctx, targetRootID, work, request, responseBytes, err); err != nil { - // TODO log responses - m.config.Log.Debug("dropping response", zap.Error(err), zap.Stringer("request", request)) + m.config.Log.Debug("dropping response", zap.Error(err)) m.retryWork(work) return } @@ -431,8 +425,7 @@ func (m *Manager) requestRangeProof(ctx context.Context, work *workItem) { defer m.finishWorkItem() if err := m.handleRangeProofResponse(ctx, targetRootID, work, request, responseBytes, appErr); err != nil { - // TODO log responses - m.config.Log.Debug("dropping response", zap.Error(err), zap.Stringer("request", request)) + m.config.Log.Debug("dropping response", zap.Error(err)) m.retryWork(work) return } @@ -468,11 +461,10 @@ func (m *Manager) retryWork(work *workItem) { m.workLock.Lock() m.unprocessedWork.Insert(work) m.workLock.Unlock() - m.unprocessedWorkCond.Signal() } // Returns an error if we should drop the response -func (m *Manager) shouldHandleResponse( +func (m *Manager) handleResponse( bytesLimit uint32, responseBytes []byte, err error, @@ -507,7 +499,7 @@ func (m *Manager) handleRangeProofResponse( responseBytes []byte, err error, ) error { - if err := m.shouldHandleResponse(request.BytesLimit, responseBytes, err); err != nil { + if err := m.handleResponse(request.BytesLimit, responseBytes, err); err != nil { return err } @@ -558,7 +550,7 @@ func (m *Manager) handleChangeProofResponse( responseBytes []byte, err error, ) error { - if err := m.shouldHandleResponse(request.BytesLimit, responseBytes, err); err != nil { + if err := m.handleResponse(request.BytesLimit, responseBytes, err); err != nil { return err } @@ -614,6 +606,7 @@ func (m *Manager) handleChangeProofResponse( m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, changeProof.EndProof) case *pb.SyncGetChangeProofResponse_RangeProof: + var rangeProof merkledb.RangeProof if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof); err != nil { return err diff --git a/x/sync/network_server.go b/x/sync/network_server.go index 2153f2fbcc97..ec70c2335b64 100644 --- a/x/sync/network_server.go +++ b/x/sync/network_server.go @@ -49,8 +49,8 @@ var ( errInvalidBounds = errors.New("start key is greater than end key") errInvalidRootHash = fmt.Errorf("root hash must have length %d", hashing.HashLen) - _ p2p.Handler = (*GetChangeProofHandler)(nil) - _ p2p.Handler = (*GetRangeProofHandler)(nil) + _ p2p.Handler = (*SyncGetChangeProofHandler)(nil) + _ p2p.Handler = (*SyncGetRangeProofHandler)(nil) ) func maybeBytesToMaybe(mb *pb.MaybeBytes) maybe.Maybe[[]byte] { @@ -60,30 +60,30 @@ func maybeBytesToMaybe(mb *pb.MaybeBytes) maybe.Maybe[[]byte] { return maybe.Nothing[[]byte]() } -func NewGetChangeProofHandler(log logging.Logger, db DB) *GetChangeProofHandler { - return &GetChangeProofHandler{ +func NewSyncGetChangeProofHandler(log logging.Logger, db DB) *SyncGetChangeProofHandler { + return &SyncGetChangeProofHandler{ log: log, db: db, } } -type GetChangeProofHandler struct { +type SyncGetChangeProofHandler struct { log logging.Logger db DB } -func (*GetChangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} +func (*SyncGetChangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} -func (g *GetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { - req := &pb.SyncGetChangeProofRequest{} - if err := proto.Unmarshal(requestBytes, req); err != nil { +func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { + request := &pb.SyncGetChangeProofRequest{} + if err := proto.Unmarshal(requestBytes, request); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("failed to unmarshal request: %s", err), } } - if err := validateChangeProofRequest(req); err != nil { + if err := validateChangeProofRequest(request); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("invalid request: %s", err), @@ -92,13 +92,13 @@ func (g *GetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ // override limits if they exceed caps var ( - keyLimit = min(req.KeyLimit, maxKeyValuesLimit) - bytesLimit = min(int(req.BytesLimit), maxByteSizeLimit) - start = maybeBytesToMaybe(req.StartKey) - end = maybeBytesToMaybe(req.EndKey) + keyLimit = min(request.KeyLimit, maxKeyValuesLimit) + bytesLimit = min(int(request.BytesLimit), maxByteSizeLimit) + start = maybeBytesToMaybe(request.StartKey) + end = maybeBytesToMaybe(request.EndKey) ) - startRoot, err := ids.ToID(req.StartRootHash) + startRoot, err := ids.ToID(request.StartRootHash) if err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, @@ -106,7 +106,7 @@ func (g *GetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ } } - endRoot, err := ids.ToID(req.EndRootHash) + endRoot, err := ids.ToID(request.EndRootHash) if err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, @@ -120,7 +120,6 @@ func (g *GetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ if !errors.Is(err, merkledb.ErrInsufficientHistory) { // We should only fail to get a change proof if we have insufficient history. // Other errors are unexpected. - // TODO define custom errors return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("failed to get change proof: %s", err), @@ -141,11 +140,11 @@ func (g *GetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ ctx, g.db, &pb.SyncGetRangeProofRequest{ - RootHash: req.EndRootHash, - StartKey: req.StartKey, - EndKey: req.EndKey, - KeyLimit: req.KeyLimit, - BytesLimit: req.BytesLimit, + RootHash: request.EndRootHash, + StartKey: request.StartKey, + EndKey: request.EndKey, + KeyLimit: request.KeyLimit, + BytesLimit: request.BytesLimit, }, func(rangeProof *merkledb.RangeProof) ([]byte, error) { return proto.Marshal(&pb.SyncGetChangeProofResponse{ @@ -192,30 +191,34 @@ func (g *GetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ } } -func NewGetRangeProofHandler(log logging.Logger, db DB) *GetRangeProofHandler { - return &GetRangeProofHandler{ +func (*SyncGetChangeProofHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + return nil, nil +} + +func NewSyncGetRangeProofHandler(log logging.Logger, db DB) *SyncGetRangeProofHandler { + return &SyncGetRangeProofHandler{ log: log, db: db, } } -type GetRangeProofHandler struct { +type SyncGetRangeProofHandler struct { log logging.Logger db DB } -func (*GetRangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} +func (*SyncGetRangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} -func (g *GetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { - req := &pb.SyncGetRangeProofRequest{} - if err := proto.Unmarshal(requestBytes, req); err != nil { +func (s *SyncGetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { + request := &pb.SyncGetRangeProofRequest{} + if err := proto.Unmarshal(requestBytes, request); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("failed to unmarshal request: %s", err), } } - if err := validateRangeProofRequest(req); err != nil { + if err := validateRangeProofRequest(request); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("invalid range proof request: %s", err), @@ -223,13 +226,13 @@ func (g *GetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ t } // override limits if they exceed caps - req.KeyLimit = min(req.KeyLimit, maxKeyValuesLimit) - req.BytesLimit = min(req.BytesLimit, maxByteSizeLimit) + request.KeyLimit = min(request.KeyLimit, maxKeyValuesLimit) + request.BytesLimit = min(request.BytesLimit, maxByteSizeLimit) proofBytes, err := getRangeProof( ctx, - g.db, - req, + s.db, + request, func(rangeProof *merkledb.RangeProof) ([]byte, error) { return proto.Marshal(rangeProof.ToProto()) }, @@ -244,6 +247,10 @@ func (g *GetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ t return proofBytes, nil } +func (*SyncGetRangeProofHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + return nil, nil +} + // Get the range proof specified by [req]. // If the generated proof is too large, the key limit is reduced // and the proof is regenerated. This process is repeated until diff --git a/x/sync/network_server_test.go b/x/sync/network_server_test.go index c78554cea59f..84dbd1c12682 100644 --- a/x/sync/network_server_test.go +++ b/x/sync/network_server_test.go @@ -85,7 +85,7 @@ func Test_Server_GetRangeProof(t *testing.T) { expectedErr: p2p.ErrUnexpected, }, { - name: "response bounded by key limit", + name: "key limit too large", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: 2 * defaultRequestKeyLimit, @@ -94,7 +94,7 @@ func Test_Server_GetRangeProof(t *testing.T) { expectedResponseLen: defaultRequestKeyLimit, }, { - name: "response bounded by byte limit", + name: "bytes limit too large", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: defaultRequestKeyLimit, @@ -118,7 +118,7 @@ func Test_Server_GetRangeProof(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - handler := NewGetRangeProofHandler(logging.NoLog{}, smallTrieDB) + handler := NewSyncGetRangeProofHandler(logging.NoLog{}, smallTrieDB) requestBytes, err := proto.Marshal(test.request) require.NoError(err) responseBytes, err := handler.AppRequest(context.Background(), test.nodeID, time.Time{}, requestBytes) @@ -130,12 +130,17 @@ func Test_Server_GetRangeProof(t *testing.T) { require.Nil(responseBytes) return } + require.NotNil(responseBytes) - var proofProto pb.RangeProof - require.NoError(proto.Unmarshal(responseBytes, &proofProto)) + var proof *merkledb.RangeProof + if !test.proofNil { + var proofProto pb.RangeProof + require.NoError(proto.Unmarshal(responseBytes, &proofProto)) - var proof merkledb.RangeProof - require.NoError(proof.UnmarshalProto(&proofProto)) + var p merkledb.RangeProof + require.NoError(p.UnmarshalProto(&proofProto)) + proof = &p + } if test.expectedResponseLen > 0 { require.LessOrEqual(len(proof.KeyValues), test.expectedResponseLen) @@ -339,7 +344,7 @@ func Test_Server_GetChangeProof(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - handler := NewGetChangeProofHandler(logging.NoLog{}, serverDB) + handler := NewSyncGetChangeProofHandler(logging.NoLog{}, serverDB) requestBytes, err := proto.Marshal(test.request) require.NoError(err) diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 41dd5829a7b8..db480f90f0c7 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -19,12 +19,13 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/p2ptest" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/x/merkledb" ) -var _ p2p.Handler = (*waitingHandler)(nil) +var _ p2p.Handler = (*testHandler)(nil) func Test_Creation(t *testing.T) { require := require.New(t) @@ -39,8 +40,8 @@ func Test_Creation(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, @@ -72,8 +73,8 @@ func Test_Completion(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, emptyDB)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, emptyDB)), TargetRoot: emptyRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -177,8 +178,8 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -253,8 +254,8 @@ func Test_Sync_FindNextKey_Deleted(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -303,8 +304,8 @@ func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -340,8 +341,8 @@ func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -376,8 +377,8 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -438,8 +439,8 @@ func TestFindNextKeyEmptyEndProof(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, db)), TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -507,8 +508,8 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -540,6 +541,7 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { // Test findNextKey by computing the expected result in a naive, inefficient // way and comparing it to the actual result + func TestFindNextKeyRandom(t *testing.T) { now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -730,8 +732,8 @@ func TestFindNextKeyRandom(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: localDB, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, remoteDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, remoteDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: ids.GenerateTestID(), SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -773,27 +775,27 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - too many leaves in response", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.KeyValues = append(response.KeyValues, merkledb.KeyValue{}) }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "range proof bad response - removed first key in response", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.KeyValues = response.KeyValues[min(1, len(response.KeyValues)):] }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "range proof bad response - removed first key in response and replaced proof", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.KeyValues = response.KeyValues[min(1, len(response.KeyValues)):] response.KeyValues = []merkledb.KeyValue{ { @@ -813,111 +815,111 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { } }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "range proof bad response - removed key from middle of response", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { i := rand.Intn(max(1, len(response.KeyValues)-1)) // #nosec G404 _ = slices.Delete(response.KeyValues, i, min(len(response.KeyValues), i+1)) }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "range proof bad response - start and end proof nodes removed", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.StartProof = nil response.EndProof = nil }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "range proof bad response - end proof removed", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.EndProof = nil }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "range proof bad response - empty proof", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.StartProof = nil response.EndProof = nil response.KeyValues = nil }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "range proof server flake", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { return p2ptest.NewClient(t, context.Background(), &flakyHandler{ - Handler: NewGetRangeProofHandler(logging.NoLog{}, db), + Handler: NewSyncGetRangeProofHandler(logging.NoLog{}, db), c: &counter{m: 2}, - }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + }) }, }, { name: "change proof bad response - too many keys in response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { response.KeyChanges = append(response.KeyChanges, make([]merkledb.KeyChange, defaultRequestKeyLimit)...) }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "change proof bad response - removed first key in response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { response.KeyChanges = response.KeyChanges[min(1, len(response.KeyChanges)):] }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { name: "change proof bad response - removed key from middle of response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { i := rand.Intn(max(1, len(response.KeyChanges)-1)) // #nosec G404 _ = slices.Delete(response.KeyChanges, i, min(len(response.KeyChanges), i+1)) }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { - name: "change proof bad response - all proof keys removed from response", + name: "all proof keys removed from response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { response.StartProof = nil response.EndProof = nil }) - return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + return p2ptest.NewClient(t, context.Background(), handler) }, }, { - name: "change proof flaky server", + name: "flaky change proof client", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { return p2ptest.NewClient(t, context.Background(), &flakyHandler{ - Handler: NewGetChangeProofHandler(logging.NoLog{}, db), + Handler: NewSyncGetChangeProofHandler(logging.NoLog{}, db), c: &counter{m: 2}, - }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + }) }, }, } @@ -945,14 +947,14 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { changeProofClient *p2p.Client ) - rangeProofHandler := NewGetRangeProofHandler(logging.NoLog{}, dbToSync) - rangeProofClient = p2ptest.NewClient(t, ctx, rangeProofHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + rangeProofHandler := NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync) + rangeProofClient = p2ptest.NewClient(t, ctx, rangeProofHandler) if tt.rangeProofClient != nil { rangeProofClient = tt.rangeProofClient(dbToSync) } - changeProofHandler := NewGetChangeProofHandler(logging.NoLog{}, dbToSync) - changeProofClient = p2ptest.NewClient(t, ctx, changeProofHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + changeProofHandler := NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync) + changeProofClient = p2ptest.NewClient(t, ctx, changeProofHandler) if tt.changeProofClient != nil { changeProofClient = tt.changeProofClient(dbToSync) } @@ -974,12 +976,8 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { require.NoError(syncer.Start(ctx)) // Simulate writes on the server - // - // TODO add more writes when api is not flaky. There is an inherent - // race condition in between writes where UpdateSyncTarget might - // error because it has already reached the sync target before it - // is called. - for i := 0; i < 50; i++ { + // TODO more than a single write when API is less flaky + for i := 0; i <= 1; i++ { addkey := make([]byte, r.Intn(50)) _, err = r.Read(addkey) require.NoError(err) @@ -1031,8 +1029,8 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1058,8 +1056,8 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { newSyncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1128,15 +1126,15 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { updatedRootChan <- struct{}{} ctx := context.Background() - rangeProofClient := p2ptest.NewClient(t, ctx, &waitingHandler{ - handler: NewGetRangeProofHandler(logging.NoLog{}, dbToSync), + rangeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ + handler: NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), updatedRootChan: updatedRootChan, - }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + }) - changeProofClient := p2ptest.NewClient(t, ctx, &waitingHandler{ - handler: NewGetChangeProofHandler(logging.NoLog{}, dbToSync), + changeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ + handler: NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), updatedRootChan: updatedRootChan, - }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) + }) syncer, err := NewManager(ManagerConfig{ DB: db, @@ -1184,11 +1182,10 @@ func Test_Sync_UpdateSyncTarget(t *testing.T) { newDefaultDBConfig(), ) require.NoError(err) - ctx := context.Background() m, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: &p2p.Client{}, + ChangeProofClient: &p2p.Client{}, TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1287,3 +1284,14 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen } return db, batch.Write() } + +type testHandler struct { + p2p.NoOpHandler + handler p2p.Handler + updatedRootChan chan struct{} +} + +func (t *testHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { + <-t.updatedRootChan + return t.handler.AppRequest(ctx, nodeID, deadline, requestBytes) +} From 34fe3a673fd486d13e1c57f8cee74a7c75abe2a2 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 10 Sep 2024 16:28:56 -0400 Subject: [PATCH 056/155] nit Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/p2p/p2ptest/client.go | 10 +++-- network/p2p/p2ptest/client_test.go | 4 +- x/sync/network_server.go | 48 ++++++++++-------------- x/sync/sync_test.go | 60 +++++++++++++++--------------- 4 files changed, 59 insertions(+), 63 deletions(-) diff --git a/network/p2p/p2ptest/client.go b/network/p2p/p2ptest/client.go index 747904b40ffb..8d41d6b99bce 100644 --- a/network/p2p/p2ptest/client.go +++ b/network/p2p/p2ptest/client.go @@ -21,15 +21,19 @@ import ( // NewClient generates a client-server pair and returns the client used to // communicate with a server with the specified handler -func NewClient(t *testing.T, rootCtx context.Context, handler p2p.Handler) *p2p.Client { +func NewClient( + t *testing.T, + rootCtx context.Context, + handler p2p.Handler, + clientNodeID ids.NodeID, + serverNodeID ids.NodeID, +) *p2p.Client { clientSender := &enginetest.Sender{} serverSender := &enginetest.Sender{} - clientNodeID := ids.GenerateTestNodeID() clientNetwork, err := p2p.NewNetwork(logging.NoLog{}, clientSender, prometheus.NewRegistry(), "") require.NoError(t, err) - serverNodeID := ids.GenerateTestNodeID() serverNetwork, err := p2p.NewNetwork(logging.NoLog{}, serverSender, prometheus.NewRegistry(), "") require.NoError(t, err) diff --git a/network/p2p/p2ptest/client_test.go b/network/p2p/p2ptest/client_test.go index cef624aaccbc..45ae970ecf0f 100644 --- a/network/p2p/p2ptest/client_test.go +++ b/network/p2p/p2ptest/client_test.go @@ -27,7 +27,7 @@ func TestNewClient_AppGossip(t *testing.T) { }, } - client := NewClient(t, ctx, testHandler) + client := NewClient(t, ctx, testHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) require.NoError(client.AppGossip(ctx, common.SendConfig{}, []byte("foobar"))) <-appGossipChan } @@ -94,7 +94,7 @@ func TestNewClient_AppRequest(t *testing.T) { }, } - client := NewClient(t, ctx, testHandler) + client := NewClient(t, ctx, testHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) require.NoError(tt.appRequestF( ctx, client, diff --git a/x/sync/network_server.go b/x/sync/network_server.go index ec70c2335b64..10d86ed140eb 100644 --- a/x/sync/network_server.go +++ b/x/sync/network_server.go @@ -75,15 +75,15 @@ type SyncGetChangeProofHandler struct { func (*SyncGetChangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { - request := &pb.SyncGetChangeProofRequest{} - if err := proto.Unmarshal(requestBytes, request); err != nil { + req := &pb.SyncGetChangeProofRequest{} + if err := proto.Unmarshal(requestBytes, req); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("failed to unmarshal request: %s", err), } } - if err := validateChangeProofRequest(request); err != nil { + if err := validateChangeProofRequest(req); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("invalid request: %s", err), @@ -92,13 +92,13 @@ func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID // override limits if they exceed caps var ( - keyLimit = min(request.KeyLimit, maxKeyValuesLimit) - bytesLimit = min(int(request.BytesLimit), maxByteSizeLimit) - start = maybeBytesToMaybe(request.StartKey) - end = maybeBytesToMaybe(request.EndKey) + keyLimit = min(req.KeyLimit, maxKeyValuesLimit) + bytesLimit = min(int(req.BytesLimit), maxByteSizeLimit) + start = maybeBytesToMaybe(req.StartKey) + end = maybeBytesToMaybe(req.EndKey) ) - startRoot, err := ids.ToID(request.StartRootHash) + startRoot, err := ids.ToID(req.StartRootHash) if err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, @@ -106,7 +106,7 @@ func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID } } - endRoot, err := ids.ToID(request.EndRootHash) + endRoot, err := ids.ToID(req.EndRootHash) if err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, @@ -140,11 +140,11 @@ func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID ctx, g.db, &pb.SyncGetRangeProofRequest{ - RootHash: request.EndRootHash, - StartKey: request.StartKey, - EndKey: request.EndKey, - KeyLimit: request.KeyLimit, - BytesLimit: request.BytesLimit, + RootHash: req.EndRootHash, + StartKey: req.StartKey, + EndKey: req.EndKey, + KeyLimit: req.KeyLimit, + BytesLimit: req.BytesLimit, }, func(rangeProof *merkledb.RangeProof) ([]byte, error) { return proto.Marshal(&pb.SyncGetChangeProofResponse{ @@ -191,10 +191,6 @@ func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID } } -func (*SyncGetChangeProofHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { - return nil, nil -} - func NewSyncGetRangeProofHandler(log logging.Logger, db DB) *SyncGetRangeProofHandler { return &SyncGetRangeProofHandler{ log: log, @@ -210,15 +206,15 @@ type SyncGetRangeProofHandler struct { func (*SyncGetRangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} func (s *SyncGetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { - request := &pb.SyncGetRangeProofRequest{} - if err := proto.Unmarshal(requestBytes, request); err != nil { + req := &pb.SyncGetRangeProofRequest{} + if err := proto.Unmarshal(requestBytes, req); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("failed to unmarshal request: %s", err), } } - if err := validateRangeProofRequest(request); err != nil { + if err := validateRangeProofRequest(req); err != nil { return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("invalid range proof request: %s", err), @@ -226,13 +222,13 @@ func (s *SyncGetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, } // override limits if they exceed caps - request.KeyLimit = min(request.KeyLimit, maxKeyValuesLimit) - request.BytesLimit = min(request.BytesLimit, maxByteSizeLimit) + req.KeyLimit = min(req.KeyLimit, maxKeyValuesLimit) + req.BytesLimit = min(req.BytesLimit, maxByteSizeLimit) proofBytes, err := getRangeProof( ctx, s.db, - request, + req, func(rangeProof *merkledb.RangeProof) ([]byte, error) { return proto.Marshal(rangeProof.ToProto()) }, @@ -247,10 +243,6 @@ func (s *SyncGetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, return proofBytes, nil } -func (*SyncGetRangeProofHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { - return nil, nil -} - // Get the range proof specified by [req]. // If the generated proof is too large, the key limit is reduced // and the proof is regenerated. This process is repeated until diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index db480f90f0c7..7373c08bf5c5 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -73,8 +73,8 @@ func Test_Completion(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, emptyDB)), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, emptyDB)), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: emptyRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -178,8 +178,8 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -377,8 +377,8 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -440,7 +440,7 @@ func TestFindNextKeyEmptyEndProof(t *testing.T) { syncer, err := NewManager(ManagerConfig{ DB: db, RangeProofClient: &p2p.Client{}, - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, db)), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -508,8 +508,8 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -779,7 +779,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyValues = append(response.KeyValues, merkledb.KeyValue{}) }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -789,7 +789,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyValues = response.KeyValues[min(1, len(response.KeyValues)):] }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -815,7 +815,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { } }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -826,7 +826,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { _ = slices.Delete(response.KeyValues, i, min(len(response.KeyValues), i+1)) }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -837,7 +837,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.EndProof = nil }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -847,7 +847,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.EndProof = nil }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -859,7 +859,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyValues = nil }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -868,7 +868,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { return p2ptest.NewClient(t, context.Background(), &flakyHandler{ Handler: NewSyncGetRangeProofHandler(logging.NoLog{}, db), c: &counter{m: 2}, - }) + }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -878,7 +878,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyChanges = append(response.KeyChanges, make([]merkledb.KeyChange, defaultRequestKeyLimit)...) }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -888,7 +888,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyChanges = response.KeyChanges[min(1, len(response.KeyChanges)):] }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -899,7 +899,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { _ = slices.Delete(response.KeyChanges, i, min(len(response.KeyChanges), i+1)) }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -910,7 +910,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.EndProof = nil }) - return p2ptest.NewClient(t, context.Background(), handler) + return p2ptest.NewClient(t, context.Background(), handler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, { @@ -919,7 +919,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { return p2ptest.NewClient(t, context.Background(), &flakyHandler{ Handler: NewSyncGetChangeProofHandler(logging.NoLog{}, db), c: &counter{m: 2}, - }) + }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, }, } @@ -948,13 +948,13 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { ) rangeProofHandler := NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync) - rangeProofClient = p2ptest.NewClient(t, ctx, rangeProofHandler) + rangeProofClient = p2ptest.NewClient(t, ctx, rangeProofHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) if tt.rangeProofClient != nil { rangeProofClient = tt.rangeProofClient(dbToSync) } changeProofHandler := NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync) - changeProofClient = p2ptest.NewClient(t, ctx, changeProofHandler) + changeProofClient = p2ptest.NewClient(t, ctx, changeProofHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) if tt.changeProofClient != nil { changeProofClient = tt.changeProofClient(dbToSync) } @@ -1029,8 +1029,8 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1056,8 +1056,8 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { newSyncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync)), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync)), + RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1129,12 +1129,12 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { rangeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ handler: NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), updatedRootChan: updatedRootChan, - }) + }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) changeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ handler: NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), updatedRootChan: updatedRootChan, - }) + }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) syncer, err := NewManager(ManagerConfig{ DB: db, From b9d550771feb4b4bcae3342a34774b1cdf567c39 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 10 Sep 2024 16:28:56 -0400 Subject: [PATCH 057/155] add acp-118 implementation Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/aggregator.go | 229 ++++++++++++++++++++++++++++++ network/acp118/aggregator_test.go | 205 ++++++++++++++++++++++++++ network/acp118/handler.go | 107 ++++++++++++++ network/acp118/handler_test.go | 118 +++++++++++++++ network/p2p/error.go | 6 + 5 files changed, 665 insertions(+) create mode 100644 network/acp118/aggregator.go create mode 100644 network/acp118/aggregator_test.go create mode 100644 network/acp118/handler.go create mode 100644 network/acp118/handler_test.go diff --git a/network/acp118/aggregator.go b/network/acp118/aggregator.go new file mode 100644 index 000000000000..9cbc10a9442c --- /dev/null +++ b/network/acp118/aggregator.go @@ -0,0 +1,229 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package acp118 + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.uber.org/zap" + "golang.org/x/sync/semaphore" + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +var ( + ErrDuplicateValidator = errors.New("duplicate validator") + ErrInsufficientSignatures = errors.New("failed to aggregate sufficient stake weight of signatures") +) + +type result struct { + message *warp.Message + err error +} + +type Validator struct { + NodeID ids.NodeID + PublicKey *bls.PublicKey + Weight uint64 +} + +type indexedValidator struct { + Validator + I int +} + +// NewSignatureAggregator returns an instance of SignatureAggregator +func NewSignatureAggregator( + log logging.Logger, + client *p2p.Client, + maxPending int, +) *SignatureAggregator { + return &SignatureAggregator{ + log: log, + client: client, + maxPending: int64(maxPending), + } +} + +// SignatureAggregator aggregates validator signatures for warp messages +type SignatureAggregator struct { + log logging.Logger + client *p2p.Client + maxPending int64 +} + +// AggregateSignatures blocks until stakeWeightThreshold of validators signs the +// provided message. Validators are issued requests in the caller-specified +// order. +func (s *SignatureAggregator) AggregateSignatures( + parentCtx context.Context, + message *warp.UnsignedMessage, + justification []byte, + validators []Validator, + stakeWeightThreshold uint64, +) (*warp.Message, error) { + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + request := &sdk.SignatureRequest{ + Message: message.Bytes(), + Justification: justification, + } + + requestBytes, err := proto.Marshal(request) + if err != nil { + return nil, fmt.Errorf("failed to marshal signature request: %w", err) + } + + done := make(chan result) + pendingRequests := semaphore.NewWeighted(s.maxPending) + lock := &sync.Mutex{} + aggregatedStakeWeight := uint64(0) + attemptedStakeWeight := uint64(0) + totalStakeWeight := uint64(0) + signatures := make([]*bls.Signature, 0) + signerBitSet := set.NewBits() + + nodeIDsToValidator := make(map[ids.NodeID]indexedValidator) + for i, v := range validators { + totalStakeWeight += v.Weight + + // Sanity check the validator set provided by the caller + if _, ok := nodeIDsToValidator[v.NodeID]; ok { + return nil, fmt.Errorf("%w: %s", ErrDuplicateValidator, v.NodeID) + } + + nodeIDsToValidator[v.NodeID] = indexedValidator{ + I: i, + Validator: v, + } + } + + onResponse := func( + _ context.Context, + nodeID ids.NodeID, + responseBytes []byte, + err error, + ) { + // We are guaranteed a response from a node in the validator set + validator := nodeIDsToValidator[nodeID] + + defer func() { + lock.Lock() + attemptedStakeWeight += validator.Weight + remainingStakeWeight := totalStakeWeight - attemptedStakeWeight + failed := remainingStakeWeight < stakeWeightThreshold + lock.Unlock() + + if failed { + done <- result{err: ErrInsufficientSignatures} + } + + pendingRequests.Release(1) + }() + + if err != nil { + s.log.Debug( + "dropping response", + zap.Stringer("nodeID", nodeID), + zap.Error(err), + ) + return + } + + response := &sdk.SignatureResponse{} + if err := proto.Unmarshal(responseBytes, response); err != nil { + s.log.Debug( + "dropping response", + zap.Stringer("nodeID", nodeID), + zap.Error(err), + ) + return + } + + signature, err := bls.SignatureFromBytes(response.Signature) + if err != nil { + s.log.Debug( + "dropping response", + zap.Stringer("nodeID", nodeID), + zap.String("reason", "invalid signature"), + zap.Error(err), + ) + return + } + + if !bls.Verify(validator.PublicKey, signature, message.Bytes()) { + s.log.Debug( + "dropping response", + zap.Stringer("nodeID", nodeID), + zap.String("reason", "public key failed verification"), + ) + return + } + + lock.Lock() + signerBitSet.Add(validator.I) + signatures = append(signatures, signature) + aggregatedStakeWeight += validator.Weight + + if aggregatedStakeWeight >= stakeWeightThreshold { + aggregateSignature, err := bls.AggregateSignatures(signatures) + if err != nil { + done <- result{err: err} + lock.Unlock() + return + } + + bitSetSignature := &warp.BitSetSignature{ + Signers: signerBitSet.Bytes(), + Signature: [bls.SignatureLen]byte{}, + } + + copy(bitSetSignature.Signature[:], bls.SignatureToBytes(aggregateSignature)) + signedMessage, err := warp.NewMessage(message, bitSetSignature) + done <- result{message: signedMessage, err: err} + lock.Unlock() + return + } + + lock.Unlock() + } + + for _, validator := range validators { + if err := pendingRequests.Acquire(ctx, 1); err != nil { + return nil, err + } + + // Avoid loop shadowing in goroutine + validatorCopy := validator + go func() { + if err := s.client.AppRequest( + ctx, + set.Of(validatorCopy.NodeID), + requestBytes, + onResponse, + ); err != nil { + done <- result{err: err} + return + } + }() + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case r := <-done: + return r.message, r.err + } +} diff --git a/network/acp118/aggregator_test.go b/network/acp118/aggregator_test.go new file mode 100644 index 000000000000..50622fc4ac99 --- /dev/null +++ b/network/acp118/aggregator_test.go @@ -0,0 +1,205 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package acp118 + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/p2ptest" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +func TestVerifier_Verify(t *testing.T) { + nodeID0 := ids.GenerateTestNodeID() + sk0, err := bls.NewSecretKey() + require.NoError(t, err) + pk0 := bls.PublicFromSecretKey(sk0) + + nodeID1 := ids.GenerateTestNodeID() + sk1, err := bls.NewSecretKey() + require.NoError(t, err) + pk1 := bls.PublicFromSecretKey(sk1) + + networkID := uint32(123) + subnetID := ids.GenerateTestID() + chainID := ids.GenerateTestID() + signer := warp.NewSigner(sk0, networkID, chainID) + + tests := []struct { + name string + + handler p2p.Handler + + ctx context.Context + validators []Validator + threshold uint64 + + pChainState validators.State + pChainHeight uint64 + quorumNum uint64 + quorumDen uint64 + + wantAggregateSignaturesErr error + wantVerifyErr error + }{ + { + name: "passes attestation and verification", + handler: NewHandler(&testAttestor{}, signer, networkID, chainID), + ctx: context.Background(), + validators: []Validator{ + { + NodeID: nodeID0, + PublicKey: pk0, + Weight: 1, + }, + }, + threshold: 1, + pChainState: &validatorstest.State{ + T: t, + GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { + return subnetID, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID0: { + NodeID: nodeID0, + PublicKey: pk0, + Weight: 1, + }, + }, nil + }, + }, + quorumNum: 1, + quorumDen: 1, + }, + { + name: "passes attestation and fails verification - insufficient stake", + handler: NewHandler(&testAttestor{}, signer, networkID, chainID), + ctx: context.Background(), + validators: []Validator{ + { + NodeID: nodeID0, + PublicKey: pk0, + Weight: 1, + }, + }, + threshold: 1, + pChainState: &validatorstest.State{ + T: t, + GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { + return subnetID, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID0: { + NodeID: nodeID0, + PublicKey: pk0, + Weight: 1, + }, + nodeID1: { + NodeID: nodeID1, + PublicKey: pk1, + Weight: 1, + }, + }, nil + }, + }, + quorumNum: 2, + quorumDen: 2, + wantVerifyErr: warp.ErrInsufficientWeight, + }, + { + name: "fails attestation", + handler: NewHandler( + &testAttestor{Err: errors.New("foobar")}, + signer, + networkID, + chainID, + ), + ctx: context.Background(), + validators: []Validator{ + { + NodeID: nodeID0, + PublicKey: pk0, + Weight: 1, + }, + }, + threshold: 1, + wantAggregateSignaturesErr: ErrInsufficientSignatures, + }, + { + name: "invalid validator set", + ctx: context.Background(), + validators: []Validator{ + { + NodeID: nodeID0, + PublicKey: pk0, + Weight: 1, + }, + { + NodeID: nodeID0, + PublicKey: pk0, + Weight: 1, + }, + }, + wantAggregateSignaturesErr: ErrDuplicateValidator, + }, + { + name: "context canceled", + ctx: func() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + return ctx + }(), + wantAggregateSignaturesErr: context.Canceled, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + ctx := context.Background() + message, err := warp.NewUnsignedMessage(networkID, chainID, []byte("payload")) + require.NoError(err) + client := p2ptest.NewClient(t, ctx, tt.handler, ids.GenerateTestNodeID(), nodeID0) + verifier := NewSignatureAggregator(logging.NoLog{}, client, 1) + + signedMessage, err := verifier.AggregateSignatures( + tt.ctx, + message, + []byte("justification"), + tt.validators, + tt.threshold, + ) + require.ErrorIs(err, tt.wantAggregateSignaturesErr) + + if signedMessage == nil { + return + } + + err = signedMessage.Signature.Verify( + ctx, + &signedMessage.UnsignedMessage, + networkID, + tt.pChainState, + 0, + tt.quorumNum, + tt.quorumDen, + ) + require.ErrorIs(err, tt.wantVerifyErr) + }) + } +} diff --git a/network/acp118/handler.go b/network/acp118/handler.go new file mode 100644 index 000000000000..e4aae28e3ce6 --- /dev/null +++ b/network/acp118/handler.go @@ -0,0 +1,107 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package acp118 + +import ( + "context" + "fmt" + "time" + + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +var _ p2p.Handler = (*Handler)(nil) + +// Attestor defines whether to a warp message payload should be attested to +type Attestor interface { + Attest(message *warp.UnsignedMessage, justification []byte) (bool, error) +} + +// NewHandler returns an instance of Handler +func NewHandler( + attestor Attestor, + signer warp.Signer, + networkID uint32, + chainID ids.ID, +) *Handler { + return &Handler{ + attestor: attestor, + signer: signer, + networkID: networkID, + chainID: chainID, + } +} + +// Handler signs warp messages +type Handler struct { + p2p.NoOpHandler + + attestor Attestor + signer warp.Signer + networkID uint32 + chainID ids.ID +} + +func (h *Handler) AppRequest( + _ context.Context, + _ ids.NodeID, + _ time.Time, + requestBytes []byte, +) ([]byte, *common.AppError) { + request := &sdk.SignatureRequest{} + if err := proto.Unmarshal(requestBytes, request); err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to unmarshal request: %s", err), + } + } + + msg, err := warp.ParseUnsignedMessage(request.Message) + if err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to initialize warp unsigned message: %s", err), + } + } + + ok, err := h.attestor.Attest(msg, request.Justification) + if err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to attest request: %s", err), + } + } + + if !ok { + return nil, p2p.ErrAttestFailed + } + + signature, err := h.signer.Sign(msg) + if err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to sign message: %s", err), + } + } + + response := &sdk.SignatureResponse{ + Signature: signature, + } + + responseBytes, err := proto.Marshal(response) + if err != nil { + return nil, &common.AppError{ + Code: p2p.ErrUnexpected.Code, + Message: fmt.Sprintf("failed to marshal response: %s", err), + } + } + + return responseBytes, nil +} diff --git a/network/acp118/handler_test.go b/network/acp118/handler_test.go new file mode 100644 index 000000000000..77af9e8dd0fb --- /dev/null +++ b/network/acp118/handler_test.go @@ -0,0 +1,118 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package acp118 + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/p2ptest" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +var _ Attestor = (*testAttestor)(nil) + +func TestHandler(t *testing.T) { + tests := []struct { + name string + attestor Attestor + expectedErr error + expectedVerify bool + }{ + { + name: "signature fails attestation", + attestor: &testAttestor{Err: errors.New("foo")}, + expectedErr: p2p.ErrUnexpected, + }, + { + name: "signature not attested", + attestor: &testAttestor{CantAttest: true}, + expectedErr: p2p.ErrAttestFailed, + }, + { + name: "signature attested", + attestor: &testAttestor{}, + expectedVerify: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + ctx := context.Background() + sk, err := bls.NewSecretKey() + require.NoError(err) + pk := bls.PublicFromSecretKey(sk) + networkID := uint32(123) + chainID := ids.GenerateTestID() + signer := warp.NewSigner(sk, networkID, chainID) + h := NewHandler(tt.attestor, signer, networkID, chainID) + clientNodeID := ids.GenerateTestNodeID() + serverNodeID := ids.GenerateTestNodeID() + c := p2ptest.NewClient( + t, + ctx, + h, + clientNodeID, + serverNodeID, + ) + + unsignedMessage, err := warp.NewUnsignedMessage( + networkID, + chainID, + []byte("payload"), + ) + require.NoError(err) + + request := &sdk.SignatureRequest{ + Message: unsignedMessage.Bytes(), + Justification: []byte("justification"), + } + + requestBytes, err := proto.Marshal(request) + require.NoError(err) + + done := make(chan struct{}) + onResponse := func(_ context.Context, _ ids.NodeID, responseBytes []byte, appErr error) { + defer close(done) + + if appErr != nil { + require.ErrorIs(tt.expectedErr, appErr) + return + } + + response := &sdk.SignatureResponse{} + require.NoError(proto.Unmarshal(responseBytes, response)) + + signature, err := bls.SignatureFromBytes(response.Signature) + require.NoError(err) + + require.Equal(tt.expectedVerify, bls.Verify(pk, signature, request.Message)) + } + + require.NoError(c.AppRequest(ctx, set.Of(serverNodeID), requestBytes, onResponse)) + <-done + }) + } +} + +// The zero value of testAttestor attests +type testAttestor struct { + CantAttest bool + Err error +} + +func (t testAttestor) Attest(*warp.UnsignedMessage, []byte) (bool, error) { + return !t.CantAttest, t.Err +} diff --git a/network/p2p/error.go b/network/p2p/error.go index 07207319a041..67b0317153e6 100644 --- a/network/p2p/error.go +++ b/network/p2p/error.go @@ -30,4 +30,10 @@ var ( Code: -4, Message: "throttled", } + // ErrAttestFailed should be used to indicate that a request failed + // to be signed due to the peer being unable to attest the message + ErrAttestFailed = &common.AppError{ + Code: -5, + Message: "failed attestation", + } ) From 99f0cde16774eb42cc9e6b00a9ac432e7773e336 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:48:26 -0400 Subject: [PATCH 058/155] undo diff Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/aggregator.go | 229 ------------------------------ network/acp118/aggregator_test.go | 205 -------------------------- x/sync/client_test.go | 19 ++- x/sync/manager.go | 23 +-- x/sync/network_server.go | 27 ++-- x/sync/network_server_test.go | 21 ++- x/sync/sync_test.go | 118 +++++++-------- 7 files changed, 107 insertions(+), 535 deletions(-) delete mode 100644 network/acp118/aggregator.go delete mode 100644 network/acp118/aggregator_test.go diff --git a/network/acp118/aggregator.go b/network/acp118/aggregator.go deleted file mode 100644 index 9cbc10a9442c..000000000000 --- a/network/acp118/aggregator.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package acp118 - -import ( - "context" - "errors" - "fmt" - "sync" - - "go.uber.org/zap" - "golang.org/x/sync/semaphore" - "google.golang.org/protobuf/proto" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/platformvm/warp" -) - -var ( - ErrDuplicateValidator = errors.New("duplicate validator") - ErrInsufficientSignatures = errors.New("failed to aggregate sufficient stake weight of signatures") -) - -type result struct { - message *warp.Message - err error -} - -type Validator struct { - NodeID ids.NodeID - PublicKey *bls.PublicKey - Weight uint64 -} - -type indexedValidator struct { - Validator - I int -} - -// NewSignatureAggregator returns an instance of SignatureAggregator -func NewSignatureAggregator( - log logging.Logger, - client *p2p.Client, - maxPending int, -) *SignatureAggregator { - return &SignatureAggregator{ - log: log, - client: client, - maxPending: int64(maxPending), - } -} - -// SignatureAggregator aggregates validator signatures for warp messages -type SignatureAggregator struct { - log logging.Logger - client *p2p.Client - maxPending int64 -} - -// AggregateSignatures blocks until stakeWeightThreshold of validators signs the -// provided message. Validators are issued requests in the caller-specified -// order. -func (s *SignatureAggregator) AggregateSignatures( - parentCtx context.Context, - message *warp.UnsignedMessage, - justification []byte, - validators []Validator, - stakeWeightThreshold uint64, -) (*warp.Message, error) { - ctx, cancel := context.WithCancel(parentCtx) - defer cancel() - - request := &sdk.SignatureRequest{ - Message: message.Bytes(), - Justification: justification, - } - - requestBytes, err := proto.Marshal(request) - if err != nil { - return nil, fmt.Errorf("failed to marshal signature request: %w", err) - } - - done := make(chan result) - pendingRequests := semaphore.NewWeighted(s.maxPending) - lock := &sync.Mutex{} - aggregatedStakeWeight := uint64(0) - attemptedStakeWeight := uint64(0) - totalStakeWeight := uint64(0) - signatures := make([]*bls.Signature, 0) - signerBitSet := set.NewBits() - - nodeIDsToValidator := make(map[ids.NodeID]indexedValidator) - for i, v := range validators { - totalStakeWeight += v.Weight - - // Sanity check the validator set provided by the caller - if _, ok := nodeIDsToValidator[v.NodeID]; ok { - return nil, fmt.Errorf("%w: %s", ErrDuplicateValidator, v.NodeID) - } - - nodeIDsToValidator[v.NodeID] = indexedValidator{ - I: i, - Validator: v, - } - } - - onResponse := func( - _ context.Context, - nodeID ids.NodeID, - responseBytes []byte, - err error, - ) { - // We are guaranteed a response from a node in the validator set - validator := nodeIDsToValidator[nodeID] - - defer func() { - lock.Lock() - attemptedStakeWeight += validator.Weight - remainingStakeWeight := totalStakeWeight - attemptedStakeWeight - failed := remainingStakeWeight < stakeWeightThreshold - lock.Unlock() - - if failed { - done <- result{err: ErrInsufficientSignatures} - } - - pendingRequests.Release(1) - }() - - if err != nil { - s.log.Debug( - "dropping response", - zap.Stringer("nodeID", nodeID), - zap.Error(err), - ) - return - } - - response := &sdk.SignatureResponse{} - if err := proto.Unmarshal(responseBytes, response); err != nil { - s.log.Debug( - "dropping response", - zap.Stringer("nodeID", nodeID), - zap.Error(err), - ) - return - } - - signature, err := bls.SignatureFromBytes(response.Signature) - if err != nil { - s.log.Debug( - "dropping response", - zap.Stringer("nodeID", nodeID), - zap.String("reason", "invalid signature"), - zap.Error(err), - ) - return - } - - if !bls.Verify(validator.PublicKey, signature, message.Bytes()) { - s.log.Debug( - "dropping response", - zap.Stringer("nodeID", nodeID), - zap.String("reason", "public key failed verification"), - ) - return - } - - lock.Lock() - signerBitSet.Add(validator.I) - signatures = append(signatures, signature) - aggregatedStakeWeight += validator.Weight - - if aggregatedStakeWeight >= stakeWeightThreshold { - aggregateSignature, err := bls.AggregateSignatures(signatures) - if err != nil { - done <- result{err: err} - lock.Unlock() - return - } - - bitSetSignature := &warp.BitSetSignature{ - Signers: signerBitSet.Bytes(), - Signature: [bls.SignatureLen]byte{}, - } - - copy(bitSetSignature.Signature[:], bls.SignatureToBytes(aggregateSignature)) - signedMessage, err := warp.NewMessage(message, bitSetSignature) - done <- result{message: signedMessage, err: err} - lock.Unlock() - return - } - - lock.Unlock() - } - - for _, validator := range validators { - if err := pendingRequests.Acquire(ctx, 1); err != nil { - return nil, err - } - - // Avoid loop shadowing in goroutine - validatorCopy := validator - go func() { - if err := s.client.AppRequest( - ctx, - set.Of(validatorCopy.NodeID), - requestBytes, - onResponse, - ); err != nil { - done <- result{err: err} - return - } - }() - } - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case r := <-done: - return r.message, r.err - } -} diff --git a/network/acp118/aggregator_test.go b/network/acp118/aggregator_test.go deleted file mode 100644 index 50622fc4ac99..000000000000 --- a/network/acp118/aggregator_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package acp118 - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/network/p2p/p2ptest" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/platformvm/warp" -) - -func TestVerifier_Verify(t *testing.T) { - nodeID0 := ids.GenerateTestNodeID() - sk0, err := bls.NewSecretKey() - require.NoError(t, err) - pk0 := bls.PublicFromSecretKey(sk0) - - nodeID1 := ids.GenerateTestNodeID() - sk1, err := bls.NewSecretKey() - require.NoError(t, err) - pk1 := bls.PublicFromSecretKey(sk1) - - networkID := uint32(123) - subnetID := ids.GenerateTestID() - chainID := ids.GenerateTestID() - signer := warp.NewSigner(sk0, networkID, chainID) - - tests := []struct { - name string - - handler p2p.Handler - - ctx context.Context - validators []Validator - threshold uint64 - - pChainState validators.State - pChainHeight uint64 - quorumNum uint64 - quorumDen uint64 - - wantAggregateSignaturesErr error - wantVerifyErr error - }{ - { - name: "passes attestation and verification", - handler: NewHandler(&testAttestor{}, signer, networkID, chainID), - ctx: context.Background(), - validators: []Validator{ - { - NodeID: nodeID0, - PublicKey: pk0, - Weight: 1, - }, - }, - threshold: 1, - pChainState: &validatorstest.State{ - T: t, - GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { - return subnetID, nil - }, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return map[ids.NodeID]*validators.GetValidatorOutput{ - nodeID0: { - NodeID: nodeID0, - PublicKey: pk0, - Weight: 1, - }, - }, nil - }, - }, - quorumNum: 1, - quorumDen: 1, - }, - { - name: "passes attestation and fails verification - insufficient stake", - handler: NewHandler(&testAttestor{}, signer, networkID, chainID), - ctx: context.Background(), - validators: []Validator{ - { - NodeID: nodeID0, - PublicKey: pk0, - Weight: 1, - }, - }, - threshold: 1, - pChainState: &validatorstest.State{ - T: t, - GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { - return subnetID, nil - }, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return map[ids.NodeID]*validators.GetValidatorOutput{ - nodeID0: { - NodeID: nodeID0, - PublicKey: pk0, - Weight: 1, - }, - nodeID1: { - NodeID: nodeID1, - PublicKey: pk1, - Weight: 1, - }, - }, nil - }, - }, - quorumNum: 2, - quorumDen: 2, - wantVerifyErr: warp.ErrInsufficientWeight, - }, - { - name: "fails attestation", - handler: NewHandler( - &testAttestor{Err: errors.New("foobar")}, - signer, - networkID, - chainID, - ), - ctx: context.Background(), - validators: []Validator{ - { - NodeID: nodeID0, - PublicKey: pk0, - Weight: 1, - }, - }, - threshold: 1, - wantAggregateSignaturesErr: ErrInsufficientSignatures, - }, - { - name: "invalid validator set", - ctx: context.Background(), - validators: []Validator{ - { - NodeID: nodeID0, - PublicKey: pk0, - Weight: 1, - }, - { - NodeID: nodeID0, - PublicKey: pk0, - Weight: 1, - }, - }, - wantAggregateSignaturesErr: ErrDuplicateValidator, - }, - { - name: "context canceled", - ctx: func() context.Context { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - return ctx - }(), - wantAggregateSignaturesErr: context.Canceled, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - - ctx := context.Background() - message, err := warp.NewUnsignedMessage(networkID, chainID, []byte("payload")) - require.NoError(err) - client := p2ptest.NewClient(t, ctx, tt.handler, ids.GenerateTestNodeID(), nodeID0) - verifier := NewSignatureAggregator(logging.NoLog{}, client, 1) - - signedMessage, err := verifier.AggregateSignatures( - tt.ctx, - message, - []byte("justification"), - tt.validators, - tt.threshold, - ) - require.ErrorIs(err, tt.wantAggregateSignaturesErr) - - if signedMessage == nil { - return - } - - err = signedMessage.Signature.Verify( - ctx, - &signedMessage.UnsignedMessage, - networkID, - tt.pChainState, - 0, - tt.quorumNum, - tt.quorumDen, - ) - require.ErrorIs(err, tt.wantVerifyErr) - }) - } -} diff --git a/x/sync/client_test.go b/x/sync/client_test.go index 2633071439da..decc3e20405d 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -38,12 +38,12 @@ func newDefaultDBConfig() merkledb.Config { } } -func newModifiedRangeProofHandler( +func newFlakyRangeProofHandler( t *testing.T, db merkledb.MerkleDB, modifyResponse func(response *merkledb.RangeProof), ) p2p.Handler { - handler := NewSyncGetRangeProofHandler(logging.NoLog{}, db) + handler := NewGetRangeProofHandler(logging.NoLog{}, db) c := counter{m: 2} return &p2p.TestHandler{ @@ -74,12 +74,12 @@ func newModifiedRangeProofHandler( } } -func newModifiedChangeProofHandler( +func newFlakyChangeProofHandler( t *testing.T, db merkledb.MerkleDB, modifyResponse func(response *merkledb.ChangeProof), ) p2p.Handler { - handler := NewSyncGetChangeProofHandler(logging.NoLog{}, db) + handler := NewGetChangeProofHandler(logging.NoLog{}, db) c := counter{m: 2} return &p2p.TestHandler{ @@ -145,3 +145,14 @@ func (c *counter) Inc() int { c.i++ return result } + +type waitingHandler struct { + p2p.NoOpHandler + handler p2p.Handler + updatedRootChan chan struct{} +} + +func (w *waitingHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { + <-w.updatedRootChan + return w.handler.AppRequest(ctx, nodeID, deadline, requestBytes) +} diff --git a/x/sync/manager.go b/x/sync/manager.go index dd176c223033..ddcdc1637088 100644 --- a/x/sync/manager.go +++ b/x/sync/manager.go @@ -41,7 +41,7 @@ var ( ErrAlreadyStarted = errors.New("cannot start a Manager that has already been started") ErrAlreadyClosed = errors.New("Manager is closed") ErrNoRangeProofClientProvided = errors.New("range proof client is a required field of the sync config") - ErrNoChangeProofClientProvided = errors.New("change proofclient is a required field of the sync config") + ErrNoChangeProofClientProvided = errors.New("change proof client is a required field of the sync config") ErrNoDatabaseProvided = errors.New("sync database is a required field of the sync config") ErrNoLogProvided = errors.New("log is a required field of the sync config") ErrZeroWorkLimit = errors.New("simultaneous work limit must be greater than 0") @@ -305,7 +305,12 @@ func (m *Manager) doWork(ctx context.Context, work *workItem) { return } - <-time.After(waitTime) + select { + case <-ctx.Done(): + m.finishWorkItem() + return + case <-time.After(waitTime): + } if work.localRootID == ids.Empty { // the keys in this range have not been downloaded, so get all key/values @@ -368,7 +373,8 @@ func (m *Manager) requestChangeProof(ctx context.Context, work *workItem) { defer m.finishWorkItem() if err := m.handleChangeProofResponse(ctx, targetRootID, work, request, responseBytes, err); err != nil { - m.config.Log.Debug("dropping response", zap.Error(err)) + // TODO log responses + m.config.Log.Debug("dropping response", zap.Error(err), zap.Stringer("request", request)) m.retryWork(work) return } @@ -425,7 +431,8 @@ func (m *Manager) requestRangeProof(ctx context.Context, work *workItem) { defer m.finishWorkItem() if err := m.handleRangeProofResponse(ctx, targetRootID, work, request, responseBytes, appErr); err != nil { - m.config.Log.Debug("dropping response", zap.Error(err)) + // TODO log responses + m.config.Log.Debug("dropping response", zap.Error(err), zap.Stringer("request", request)) m.retryWork(work) return } @@ -461,10 +468,11 @@ func (m *Manager) retryWork(work *workItem) { m.workLock.Lock() m.unprocessedWork.Insert(work) m.workLock.Unlock() + m.unprocessedWorkCond.Signal() } // Returns an error if we should drop the response -func (m *Manager) handleResponse( +func (m *Manager) shouldHandleResponse( bytesLimit uint32, responseBytes []byte, err error, @@ -499,7 +507,7 @@ func (m *Manager) handleRangeProofResponse( responseBytes []byte, err error, ) error { - if err := m.handleResponse(request.BytesLimit, responseBytes, err); err != nil { + if err := m.shouldHandleResponse(request.BytesLimit, responseBytes, err); err != nil { return err } @@ -550,7 +558,7 @@ func (m *Manager) handleChangeProofResponse( responseBytes []byte, err error, ) error { - if err := m.handleResponse(request.BytesLimit, responseBytes, err); err != nil { + if err := m.shouldHandleResponse(request.BytesLimit, responseBytes, err); err != nil { return err } @@ -606,7 +614,6 @@ func (m *Manager) handleChangeProofResponse( m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, changeProof.EndProof) case *pb.SyncGetChangeProofResponse_RangeProof: - var rangeProof merkledb.RangeProof if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof); err != nil { return err diff --git a/x/sync/network_server.go b/x/sync/network_server.go index 10d86ed140eb..2153f2fbcc97 100644 --- a/x/sync/network_server.go +++ b/x/sync/network_server.go @@ -49,8 +49,8 @@ var ( errInvalidBounds = errors.New("start key is greater than end key") errInvalidRootHash = fmt.Errorf("root hash must have length %d", hashing.HashLen) - _ p2p.Handler = (*SyncGetChangeProofHandler)(nil) - _ p2p.Handler = (*SyncGetRangeProofHandler)(nil) + _ p2p.Handler = (*GetChangeProofHandler)(nil) + _ p2p.Handler = (*GetRangeProofHandler)(nil) ) func maybeBytesToMaybe(mb *pb.MaybeBytes) maybe.Maybe[[]byte] { @@ -60,21 +60,21 @@ func maybeBytesToMaybe(mb *pb.MaybeBytes) maybe.Maybe[[]byte] { return maybe.Nothing[[]byte]() } -func NewSyncGetChangeProofHandler(log logging.Logger, db DB) *SyncGetChangeProofHandler { - return &SyncGetChangeProofHandler{ +func NewGetChangeProofHandler(log logging.Logger, db DB) *GetChangeProofHandler { + return &GetChangeProofHandler{ log: log, db: db, } } -type SyncGetChangeProofHandler struct { +type GetChangeProofHandler struct { log logging.Logger db DB } -func (*SyncGetChangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} +func (*GetChangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} -func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { +func (g *GetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { req := &pb.SyncGetChangeProofRequest{} if err := proto.Unmarshal(requestBytes, req); err != nil { return nil, &common.AppError{ @@ -120,6 +120,7 @@ func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID if !errors.Is(err, merkledb.ErrInsufficientHistory) { // We should only fail to get a change proof if we have insufficient history. // Other errors are unexpected. + // TODO define custom errors return nil, &common.AppError{ Code: p2p.ErrUnexpected.Code, Message: fmt.Sprintf("failed to get change proof: %s", err), @@ -191,21 +192,21 @@ func (s *SyncGetChangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID } } -func NewSyncGetRangeProofHandler(log logging.Logger, db DB) *SyncGetRangeProofHandler { - return &SyncGetRangeProofHandler{ +func NewGetRangeProofHandler(log logging.Logger, db DB) *GetRangeProofHandler { + return &GetRangeProofHandler{ log: log, db: db, } } -type SyncGetRangeProofHandler struct { +type GetRangeProofHandler struct { log logging.Logger db DB } -func (*SyncGetRangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} +func (*GetRangeProofHandler) AppGossip(context.Context, ids.NodeID, []byte) {} -func (s *SyncGetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { +func (g *GetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, *common.AppError) { req := &pb.SyncGetRangeProofRequest{} if err := proto.Unmarshal(requestBytes, req); err != nil { return nil, &common.AppError{ @@ -227,7 +228,7 @@ func (s *SyncGetRangeProofHandler) AppRequest(ctx context.Context, _ ids.NodeID, proofBytes, err := getRangeProof( ctx, - s.db, + g.db, req, func(rangeProof *merkledb.RangeProof) ([]byte, error) { return proto.Marshal(rangeProof.ToProto()) diff --git a/x/sync/network_server_test.go b/x/sync/network_server_test.go index 84dbd1c12682..c78554cea59f 100644 --- a/x/sync/network_server_test.go +++ b/x/sync/network_server_test.go @@ -85,7 +85,7 @@ func Test_Server_GetRangeProof(t *testing.T) { expectedErr: p2p.ErrUnexpected, }, { - name: "key limit too large", + name: "response bounded by key limit", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: 2 * defaultRequestKeyLimit, @@ -94,7 +94,7 @@ func Test_Server_GetRangeProof(t *testing.T) { expectedResponseLen: defaultRequestKeyLimit, }, { - name: "bytes limit too large", + name: "response bounded by byte limit", request: &pb.SyncGetRangeProofRequest{ RootHash: smallTrieRoot[:], KeyLimit: defaultRequestKeyLimit, @@ -118,7 +118,7 @@ func Test_Server_GetRangeProof(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - handler := NewSyncGetRangeProofHandler(logging.NoLog{}, smallTrieDB) + handler := NewGetRangeProofHandler(logging.NoLog{}, smallTrieDB) requestBytes, err := proto.Marshal(test.request) require.NoError(err) responseBytes, err := handler.AppRequest(context.Background(), test.nodeID, time.Time{}, requestBytes) @@ -130,17 +130,12 @@ func Test_Server_GetRangeProof(t *testing.T) { require.Nil(responseBytes) return } - require.NotNil(responseBytes) - var proof *merkledb.RangeProof - if !test.proofNil { - var proofProto pb.RangeProof - require.NoError(proto.Unmarshal(responseBytes, &proofProto)) + var proofProto pb.RangeProof + require.NoError(proto.Unmarshal(responseBytes, &proofProto)) - var p merkledb.RangeProof - require.NoError(p.UnmarshalProto(&proofProto)) - proof = &p - } + var proof merkledb.RangeProof + require.NoError(proof.UnmarshalProto(&proofProto)) if test.expectedResponseLen > 0 { require.LessOrEqual(len(proof.KeyValues), test.expectedResponseLen) @@ -344,7 +339,7 @@ func Test_Server_GetChangeProof(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - handler := NewSyncGetChangeProofHandler(logging.NoLog{}, serverDB) + handler := NewGetChangeProofHandler(logging.NoLog{}, serverDB) requestBytes, err := proto.Marshal(test.request) require.NoError(err) diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 7373c08bf5c5..41dd5829a7b8 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -19,13 +19,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/p2ptest" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/x/merkledb" ) -var _ p2p.Handler = (*testHandler)(nil) +var _ p2p.Handler = (*waitingHandler)(nil) func Test_Creation(t *testing.T) { require := require.New(t) @@ -40,8 +39,8 @@ func Test_Creation(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: &p2p.Client{}, - ChangeProofClient: &p2p.Client{}, + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), SimultaneousWorkLimit: 5, Log: logging.NoLog{}, BranchFactor: merkledb.BranchFactor16, @@ -73,8 +72,8 @@ func Test_Completion(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, emptyDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: emptyRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -178,8 +177,8 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -254,8 +253,8 @@ func Test_Sync_FindNextKey_Deleted(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: &p2p.Client{}, - ChangeProofClient: &p2p.Client{}, + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -304,8 +303,8 @@ func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: &p2p.Client{}, - ChangeProofClient: &p2p.Client{}, + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -341,8 +340,8 @@ func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: &p2p.Client{}, - ChangeProofClient: &p2p.Client{}, + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: targetRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -377,8 +376,8 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -439,8 +438,8 @@ func TestFindNextKeyEmptyEndProof(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: &p2p.Client{}, - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -508,8 +507,8 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -541,7 +540,6 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { // Test findNextKey by computing the expected result in a naive, inefficient // way and comparing it to the actual result - func TestFindNextKeyRandom(t *testing.T) { now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -732,8 +730,8 @@ func TestFindNextKeyRandom(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: localDB, - RangeProofClient: &p2p.Client{}, - ChangeProofClient: &p2p.Client{}, + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, remoteDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, remoteDB), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: ids.GenerateTestID(), SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -775,7 +773,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - too many leaves in response", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.KeyValues = append(response.KeyValues, merkledb.KeyValue{}) }) @@ -785,7 +783,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - removed first key in response", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.KeyValues = response.KeyValues[min(1, len(response.KeyValues)):] }) @@ -795,7 +793,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - removed first key in response and replaced proof", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.KeyValues = response.KeyValues[min(1, len(response.KeyValues)):] response.KeyValues = []merkledb.KeyValue{ { @@ -821,7 +819,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - removed key from middle of response", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { i := rand.Intn(max(1, len(response.KeyValues)-1)) // #nosec G404 _ = slices.Delete(response.KeyValues, i, min(len(response.KeyValues), i+1)) }) @@ -832,7 +830,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - start and end proof nodes removed", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.StartProof = nil response.EndProof = nil }) @@ -843,7 +841,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - end proof removed", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.EndProof = nil }) @@ -853,7 +851,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "range proof bad response - empty proof", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedRangeProofHandler(t, db, func(response *merkledb.RangeProof) { + handler := newFlakyRangeProofHandler(t, db, func(response *merkledb.RangeProof) { response.StartProof = nil response.EndProof = nil response.KeyValues = nil @@ -866,7 +864,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { name: "range proof server flake", rangeProofClient: func(db merkledb.MerkleDB) *p2p.Client { return p2ptest.NewClient(t, context.Background(), &flakyHandler{ - Handler: NewSyncGetRangeProofHandler(logging.NoLog{}, db), + Handler: NewGetRangeProofHandler(logging.NoLog{}, db), c: &counter{m: 2}, }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, @@ -874,7 +872,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "change proof bad response - too many keys in response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { response.KeyChanges = append(response.KeyChanges, make([]merkledb.KeyChange, defaultRequestKeyLimit)...) }) @@ -884,7 +882,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "change proof bad response - removed first key in response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { response.KeyChanges = response.KeyChanges[min(1, len(response.KeyChanges)):] }) @@ -894,7 +892,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { { name: "change proof bad response - removed key from middle of response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { i := rand.Intn(max(1, len(response.KeyChanges)-1)) // #nosec G404 _ = slices.Delete(response.KeyChanges, i, min(len(response.KeyChanges), i+1)) }) @@ -903,9 +901,9 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { }, }, { - name: "all proof keys removed from response", + name: "change proof bad response - all proof keys removed from response", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { - handler := newModifiedChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { + handler := newFlakyChangeProofHandler(t, db, func(response *merkledb.ChangeProof) { response.StartProof = nil response.EndProof = nil }) @@ -914,10 +912,10 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { }, }, { - name: "flaky change proof client", + name: "change proof flaky server", changeProofClient: func(db merkledb.MerkleDB) *p2p.Client { return p2ptest.NewClient(t, context.Background(), &flakyHandler{ - Handler: NewSyncGetChangeProofHandler(logging.NoLog{}, db), + Handler: NewGetChangeProofHandler(logging.NoLog{}, db), c: &counter{m: 2}, }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) }, @@ -947,13 +945,13 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { changeProofClient *p2p.Client ) - rangeProofHandler := NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync) + rangeProofHandler := NewGetRangeProofHandler(logging.NoLog{}, dbToSync) rangeProofClient = p2ptest.NewClient(t, ctx, rangeProofHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) if tt.rangeProofClient != nil { rangeProofClient = tt.rangeProofClient(dbToSync) } - changeProofHandler := NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync) + changeProofHandler := NewGetChangeProofHandler(logging.NoLog{}, dbToSync) changeProofClient = p2ptest.NewClient(t, ctx, changeProofHandler, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) if tt.changeProofClient != nil { changeProofClient = tt.changeProofClient(dbToSync) @@ -976,8 +974,12 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { require.NoError(syncer.Start(ctx)) // Simulate writes on the server - // TODO more than a single write when API is less flaky - for i := 0; i <= 1; i++ { + // + // TODO add more writes when api is not flaky. There is an inherent + // race condition in between writes where UpdateSyncTarget might + // error because it has already reached the sync target before it + // is called. + for i := 0; i < 50; i++ { addkey := make([]byte, r.Intn(50)) _, err = r.Read(addkey) require.NoError(err) @@ -1029,8 +1031,8 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { ctx := context.Background() syncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1056,8 +1058,8 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { newSyncer, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), - ChangeProofClient: p2ptest.NewClient(t, ctx, NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, dbToSync), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1126,13 +1128,13 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { updatedRootChan <- struct{}{} ctx := context.Background() - rangeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ - handler: NewSyncGetRangeProofHandler(logging.NoLog{}, dbToSync), + rangeProofClient := p2ptest.NewClient(t, ctx, &waitingHandler{ + handler: NewGetRangeProofHandler(logging.NoLog{}, dbToSync), updatedRootChan: updatedRootChan, }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) - changeProofClient := p2ptest.NewClient(t, ctx, &testHandler{ - handler: NewSyncGetChangeProofHandler(logging.NoLog{}, dbToSync), + changeProofClient := p2ptest.NewClient(t, ctx, &waitingHandler{ + handler: NewGetChangeProofHandler(logging.NoLog{}, dbToSync), updatedRootChan: updatedRootChan, }, ids.GenerateTestNodeID(), ids.GenerateTestNodeID()) @@ -1182,10 +1184,11 @@ func Test_Sync_UpdateSyncTarget(t *testing.T) { newDefaultDBConfig(), ) require.NoError(err) + ctx := context.Background() m, err := NewManager(ManagerConfig{ DB: db, - RangeProofClient: &p2p.Client{}, - ChangeProofClient: &p2p.Client{}, + RangeProofClient: p2ptest.NewClient(t, ctx, NewGetRangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), + ChangeProofClient: p2ptest.NewClient(t, ctx, NewGetChangeProofHandler(logging.NoLog{}, db), ids.GenerateTestNodeID(), ids.GenerateTestNodeID()), TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, @@ -1284,14 +1287,3 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen } return db, batch.Write() } - -type testHandler struct { - p2p.NoOpHandler - handler p2p.Handler - updatedRootChan chan struct{} -} - -func (t *testHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { - <-t.updatedRootChan - return t.handler.AppRequest(ctx, nodeID, deadline, requestBytes) -} From 00fc8d1b578c41484d21b26b1b9a165f8191ee3a Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:54:09 -0400 Subject: [PATCH 059/155] nit Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/handler.go | 25 ++++++++----------------- network/acp118/handler_test.go | 12 ++++++------ 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/network/acp118/handler.go b/network/acp118/handler.go index e4aae28e3ce6..1a9e210e55d8 100644 --- a/network/acp118/handler.go +++ b/network/acp118/handler.go @@ -21,21 +21,17 @@ var _ p2p.Handler = (*Handler)(nil) // Attestor defines whether to a warp message payload should be attested to type Attestor interface { - Attest(message *warp.UnsignedMessage, justification []byte) (bool, error) + Attest(message *warp.UnsignedMessage, justification []byte) (bool, *common.AppError) } // NewHandler returns an instance of Handler func NewHandler( attestor Attestor, signer warp.Signer, - networkID uint32, - chainID ids.ID, ) *Handler { return &Handler{ - attestor: attestor, - signer: signer, - networkID: networkID, - chainID: chainID, + attestor: attestor, + signer: signer, } } @@ -43,10 +39,8 @@ func NewHandler( type Handler struct { p2p.NoOpHandler - attestor Attestor - signer warp.Signer - networkID uint32 - chainID ids.ID + attestor Attestor + signer warp.Signer } func (h *Handler) AppRequest( @@ -71,12 +65,9 @@ func (h *Handler) AppRequest( } } - ok, err := h.attestor.Attest(msg, request.Justification) - if err != nil { - return nil, &common.AppError{ - Code: p2p.ErrUnexpected.Code, - Message: fmt.Sprintf("failed to attest request: %s", err), - } + ok, appErr := h.attestor.Attest(msg, request.Justification) + if appErr != nil { + return nil, appErr } if !ok { diff --git a/network/acp118/handler_test.go b/network/acp118/handler_test.go index 77af9e8dd0fb..c3a9a7de96ee 100644 --- a/network/acp118/handler_test.go +++ b/network/acp118/handler_test.go @@ -5,7 +5,6 @@ package acp118 import ( "context" - "errors" "testing" "github.com/stretchr/testify/require" @@ -15,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/p2ptest" "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/warp" @@ -31,8 +31,8 @@ func TestHandler(t *testing.T) { }{ { name: "signature fails attestation", - attestor: &testAttestor{Err: errors.New("foo")}, - expectedErr: p2p.ErrUnexpected, + attestor: &testAttestor{Err: &common.AppError{Code: int32(123)}}, + expectedErr: &common.AppError{Code: int32(123)}, }, { name: "signature not attested", @@ -57,7 +57,7 @@ func TestHandler(t *testing.T) { networkID := uint32(123) chainID := ids.GenerateTestID() signer := warp.NewSigner(sk, networkID, chainID) - h := NewHandler(tt.attestor, signer, networkID, chainID) + h := NewHandler(tt.attestor, signer) clientNodeID := ids.GenerateTestNodeID() serverNodeID := ids.GenerateTestNodeID() c := p2ptest.NewClient( @@ -110,9 +110,9 @@ func TestHandler(t *testing.T) { // The zero value of testAttestor attests type testAttestor struct { CantAttest bool - Err error + Err *common.AppError } -func (t testAttestor) Attest(*warp.UnsignedMessage, []byte) (bool, error) { +func (t testAttestor) Attest(*warp.UnsignedMessage, []byte) (bool, *common.AppError) { return !t.CantAttest, t.Err } From c08a1d71d0245fd36508f2a1ece8824f1beddf49 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:55:40 -0400 Subject: [PATCH 060/155] undo Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/p2p/p2ptest/client.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/network/p2p/p2ptest/client.go b/network/p2p/p2ptest/client.go index 8d41d6b99bce..b75654028666 100644 --- a/network/p2p/p2ptest/client.go +++ b/network/p2p/p2ptest/client.go @@ -23,7 +23,7 @@ import ( // communicate with a server with the specified handler func NewClient( t *testing.T, - rootCtx context.Context, + ctx context.Context, handler p2p.Handler, clientNodeID ids.NodeID, serverNodeID ids.NodeID, @@ -38,6 +38,8 @@ func NewClient( require.NoError(t, err) clientSender.SendAppGossipF = func(ctx context.Context, _ common.SendConfig, gossipBytes []byte) error { + // Send the request asynchronously to avoid deadlock when the server + // sends the response back to the client go func() { require.NoError(t, serverNetwork.AppGossip(ctx, clientNodeID, gossipBytes)) }() @@ -56,6 +58,8 @@ func NewClient( } serverSender.SendAppResponseF = func(ctx context.Context, _ ids.NodeID, requestID uint32, responseBytes []byte) error { + // Send the request asynchronously to avoid deadlock when the server + // sends the response back to the client go func() { require.NoError(t, clientNetwork.AppResponse(ctx, serverNodeID, requestID, responseBytes)) }() @@ -64,6 +68,8 @@ func NewClient( } serverSender.SendAppErrorF = func(ctx context.Context, _ ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + // Send the request asynchronously to avoid deadlock when the server + // sends the response back to the client go func() { require.NoError(t, clientNetwork.AppRequestFailed(ctx, serverNodeID, requestID, &common.AppError{ Code: errorCode, @@ -74,10 +80,10 @@ func NewClient( return nil } - require.NoError(t, clientNetwork.Connected(rootCtx, clientNodeID, nil)) - require.NoError(t, clientNetwork.Connected(rootCtx, serverNodeID, nil)) - require.NoError(t, serverNetwork.Connected(rootCtx, clientNodeID, nil)) - require.NoError(t, serverNetwork.Connected(rootCtx, serverNodeID, nil)) + require.NoError(t, clientNetwork.Connected(ctx, clientNodeID, nil)) + require.NoError(t, clientNetwork.Connected(ctx, serverNodeID, nil)) + require.NoError(t, serverNetwork.Connected(ctx, clientNodeID, nil)) + require.NoError(t, serverNetwork.Connected(ctx, serverNodeID, nil)) require.NoError(t, serverNetwork.AddHandler(0, handler)) return clientNetwork.NewClient(0) From 9d07a45869d2a90bc54f3592ea6c178680f23ae8 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:57:31 -0400 Subject: [PATCH 061/155] nit Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/handler.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/network/acp118/handler.go b/network/acp118/handler.go index 1a9e210e55d8..1a0c43bb0bc6 100644 --- a/network/acp118/handler.go +++ b/network/acp118/handler.go @@ -25,10 +25,7 @@ type Attestor interface { } // NewHandler returns an instance of Handler -func NewHandler( - attestor Attestor, - signer warp.Signer, -) *Handler { +func NewHandler(attestor Attestor, signer warp.Signer) *Handler { return &Handler{ attestor: attestor, signer: signer, From f91e0a89611e137eb54d7da46e1a4478a7fe6877 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:02:48 -0400 Subject: [PATCH 062/155] nit Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/handler.go | 11 +++-------- network/acp118/handler_test.go | 13 +++---------- network/p2p/error.go | 6 ------ 3 files changed, 6 insertions(+), 24 deletions(-) diff --git a/network/acp118/handler.go b/network/acp118/handler.go index 1a0c43bb0bc6..0fb4dd6185b6 100644 --- a/network/acp118/handler.go +++ b/network/acp118/handler.go @@ -21,7 +21,7 @@ var _ p2p.Handler = (*Handler)(nil) // Attestor defines whether to a warp message payload should be attested to type Attestor interface { - Attest(message *warp.UnsignedMessage, justification []byte) (bool, *common.AppError) + Attest(message *warp.UnsignedMessage, justification []byte) *common.AppError } // NewHandler returns an instance of Handler @@ -62,13 +62,8 @@ func (h *Handler) AppRequest( } } - ok, appErr := h.attestor.Attest(msg, request.Justification) - if appErr != nil { - return nil, appErr - } - - if !ok { - return nil, p2p.ErrAttestFailed + if err := h.attestor.Attest(msg, request.Justification); err != nil { + return nil, err } signature, err := h.signer.Sign(msg) diff --git a/network/acp118/handler_test.go b/network/acp118/handler_test.go index c3a9a7de96ee..0897e74e39c7 100644 --- a/network/acp118/handler_test.go +++ b/network/acp118/handler_test.go @@ -11,7 +11,6 @@ import ( "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/p2ptest" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -34,11 +33,6 @@ func TestHandler(t *testing.T) { attestor: &testAttestor{Err: &common.AppError{Code: int32(123)}}, expectedErr: &common.AppError{Code: int32(123)}, }, - { - name: "signature not attested", - attestor: &testAttestor{CantAttest: true}, - expectedErr: p2p.ErrAttestFailed, - }, { name: "signature attested", attestor: &testAttestor{}, @@ -109,10 +103,9 @@ func TestHandler(t *testing.T) { // The zero value of testAttestor attests type testAttestor struct { - CantAttest bool - Err *common.AppError + Err *common.AppError } -func (t testAttestor) Attest(*warp.UnsignedMessage, []byte) (bool, *common.AppError) { - return !t.CantAttest, t.Err +func (t testAttestor) Attest(*warp.UnsignedMessage, []byte) *common.AppError { + return t.Err } diff --git a/network/p2p/error.go b/network/p2p/error.go index 67b0317153e6..07207319a041 100644 --- a/network/p2p/error.go +++ b/network/p2p/error.go @@ -30,10 +30,4 @@ var ( Code: -4, Message: "throttled", } - // ErrAttestFailed should be used to indicate that a request failed - // to be signed due to the peer being unable to attest the message - ErrAttestFailed = &common.AppError{ - Code: -5, - Message: "failed attestation", - } ) From a35b09089f30c988ddceb47ecee87becfdae2ddd Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:08:02 -0400 Subject: [PATCH 063/155] add context Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/handler.go | 10 +++++++--- network/acp118/handler_test.go | 6 +++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/network/acp118/handler.go b/network/acp118/handler.go index 0fb4dd6185b6..1dc9ef59fb40 100644 --- a/network/acp118/handler.go +++ b/network/acp118/handler.go @@ -21,7 +21,11 @@ var _ p2p.Handler = (*Handler)(nil) // Attestor defines whether to a warp message payload should be attested to type Attestor interface { - Attest(message *warp.UnsignedMessage, justification []byte) *common.AppError + Attest( + ctx context.Context, + message *warp.UnsignedMessage, + justification []byte, + ) *common.AppError } // NewHandler returns an instance of Handler @@ -41,7 +45,7 @@ type Handler struct { } func (h *Handler) AppRequest( - _ context.Context, + ctx context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte, @@ -62,7 +66,7 @@ func (h *Handler) AppRequest( } } - if err := h.attestor.Attest(msg, request.Justification); err != nil { + if err := h.attestor.Attest(ctx, msg, request.Justification); err != nil { return nil, err } diff --git a/network/acp118/handler_test.go b/network/acp118/handler_test.go index 0897e74e39c7..c5cc827bc1e3 100644 --- a/network/acp118/handler_test.go +++ b/network/acp118/handler_test.go @@ -106,6 +106,10 @@ type testAttestor struct { Err *common.AppError } -func (t testAttestor) Attest(*warp.UnsignedMessage, []byte) *common.AppError { +func (t testAttestor) Attest( + context.Context, + *warp.UnsignedMessage, + []byte, +) *common.AppError { return t.Err } From dd7029cd7564581ae7f2806ba9237a1fa4f714c1 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:08:18 -0400 Subject: [PATCH 064/155] fix Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/handler_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/acp118/handler_test.go b/network/acp118/handler_test.go index c5cc827bc1e3..6a1f866851f6 100644 --- a/network/acp118/handler_test.go +++ b/network/acp118/handler_test.go @@ -95,7 +95,7 @@ func TestHandler(t *testing.T) { require.Equal(tt.expectedVerify, bls.Verify(pk, signature, request.Message)) } - require.NoError(c.AppRequest(ctx, set.Of(serverNodeID), requestBytes, onResponse)) + require.NoError(c.AppRequest(ctx, set.Of(clientNodeID), requestBytes, onResponse)) <-done }) } From 8978452302048f2513647c6455b797819b3d9e1c Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:11:25 -0400 Subject: [PATCH 065/155] rename attestor -> verifier Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/acp118/handler.go | 14 +++++++------- network/acp118/handler_test.go | 20 ++++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/network/acp118/handler.go b/network/acp118/handler.go index 1dc9ef59fb40..058c5af9dbfb 100644 --- a/network/acp118/handler.go +++ b/network/acp118/handler.go @@ -19,9 +19,9 @@ import ( var _ p2p.Handler = (*Handler)(nil) -// Attestor defines whether to a warp message payload should be attested to -type Attestor interface { - Attest( +// Verifier defines whether a warp message payload should be verified +type Verifier interface { + Verify( ctx context.Context, message *warp.UnsignedMessage, justification []byte, @@ -29,9 +29,9 @@ type Attestor interface { } // NewHandler returns an instance of Handler -func NewHandler(attestor Attestor, signer warp.Signer) *Handler { +func NewHandler(verifier Verifier, signer warp.Signer) *Handler { return &Handler{ - attestor: attestor, + verifier: verifier, signer: signer, } } @@ -40,7 +40,7 @@ func NewHandler(attestor Attestor, signer warp.Signer) *Handler { type Handler struct { p2p.NoOpHandler - attestor Attestor + verifier Verifier signer warp.Signer } @@ -66,7 +66,7 @@ func (h *Handler) AppRequest( } } - if err := h.attestor.Attest(ctx, msg, request.Justification); err != nil { + if err := h.verifier.Verify(ctx, msg, request.Justification); err != nil { return nil, err } diff --git a/network/acp118/handler_test.go b/network/acp118/handler_test.go index 6a1f866851f6..bdf76f3b942c 100644 --- a/network/acp118/handler_test.go +++ b/network/acp118/handler_test.go @@ -19,23 +19,23 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) -var _ Attestor = (*testAttestor)(nil) +var _ Verifier = (*testVerifier)(nil) func TestHandler(t *testing.T) { tests := []struct { name string - attestor Attestor + verifier Verifier expectedErr error expectedVerify bool }{ { - name: "signature fails attestation", - attestor: &testAttestor{Err: &common.AppError{Code: int32(123)}}, + name: "signature fails verification", + verifier: &testVerifier{Err: &common.AppError{Code: int32(123)}}, expectedErr: &common.AppError{Code: int32(123)}, }, { - name: "signature attested", - attestor: &testAttestor{}, + name: "signature signed", + verifier: &testVerifier{}, expectedVerify: true, }, } @@ -51,7 +51,7 @@ func TestHandler(t *testing.T) { networkID := uint32(123) chainID := ids.GenerateTestID() signer := warp.NewSigner(sk, networkID, chainID) - h := NewHandler(tt.attestor, signer) + h := NewHandler(tt.verifier, signer) clientNodeID := ids.GenerateTestNodeID() serverNodeID := ids.GenerateTestNodeID() c := p2ptest.NewClient( @@ -101,12 +101,12 @@ func TestHandler(t *testing.T) { } } -// The zero value of testAttestor attests -type testAttestor struct { +// The zero value of testVerifier verifies +type testVerifier struct { Err *common.AppError } -func (t testAttestor) Attest( +func (t testVerifier) Verify( context.Context, *warp.UnsignedMessage, []byte, From a3832095128c0fe69e64da579f1e91b668e5a2e7 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:21:23 -0400 Subject: [PATCH 066/155] nit Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/{ => p2p}/acp118/handler.go | 0 network/{ => p2p}/acp118/handler_test.go | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename network/{ => p2p}/acp118/handler.go (100%) rename network/{ => p2p}/acp118/handler_test.go (100%) diff --git a/network/acp118/handler.go b/network/p2p/acp118/handler.go similarity index 100% rename from network/acp118/handler.go rename to network/p2p/acp118/handler.go diff --git a/network/acp118/handler_test.go b/network/p2p/acp118/handler_test.go similarity index 100% rename from network/acp118/handler_test.go rename to network/p2p/acp118/handler_test.go From 8b52eadf9434ad162f2661702072c26e604bc0f9 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:25:35 -0400 Subject: [PATCH 067/155] nit Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- network/p2p/acp118/handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/p2p/acp118/handler.go b/network/p2p/acp118/handler.go index 058c5af9dbfb..8fb39b8a5513 100644 --- a/network/p2p/acp118/handler.go +++ b/network/p2p/acp118/handler.go @@ -19,7 +19,7 @@ import ( var _ p2p.Handler = (*Handler)(nil) -// Verifier defines whether a warp message payload should be verified +// Verifier verifies that a warp message should be signed type Verifier interface { Verify( ctx context.Context, From ddb9ba6d72b7957d3374e20f4ef7a5dcc333cc32 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 1 Oct 2024 15:11:05 -0400 Subject: [PATCH 068/155] Add deactivation owner --- vms/platformvm/state/subnet_only_validator.go | 5 +++++ .../state/subnet_only_validator_test.go | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 9b505cb22842..487471a5f2b7 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -76,6 +76,10 @@ type SubnetOnlyValidator struct { // balance of the validator after removing accrued fees. RemainingBalanceOwner []byte `serialize:"true"` + // DeactivationOwner is the owner that can manually deactivate the + // validator. + DeactivationOwner []byte `serialize:"true"` + // StartTime is the unix timestamp, in seconds, when this validator was // added to the set. StartTime uint64 `serialize:"true"` @@ -128,6 +132,7 @@ func (v SubnetOnlyValidator) validateConstants(o SubnetOnlyValidator) bool { v.NodeID == o.NodeID && bytes.Equal(v.PublicKey, o.PublicKey) && bytes.Equal(v.RemainingBalanceOwner, o.RemainingBalanceOwner) && + bytes.Equal(v.DeactivationOwner, o.DeactivationOwner) && v.StartTime == o.StartTime } diff --git a/vms/platformvm/state/subnet_only_validator_test.go b/vms/platformvm/state/subnet_only_validator_test.go index e76c4b73a242..3ebf754382d9 100644 --- a/vms/platformvm/state/subnet_only_validator_test.go +++ b/vms/platformvm/state/subnet_only_validator_test.go @@ -82,6 +82,7 @@ func TestSubnetOnlyValidator_validateConstants(t *testing.T) { NodeID: ids.GenerateTestNodeID(), PublicKey: utils.RandomBytes(bls.PublicKeyLen), RemainingBalanceOwner: utils.RandomBytes(32), + DeactivationOwner: utils.RandomBytes(32), StartTime: rand.Uint64(), // #nosec G404 } @@ -103,6 +104,7 @@ func TestSubnetOnlyValidator_validateConstants(t *testing.T) { NodeID: ids.GenerateTestNodeID(), PublicKey: utils.RandomBytes(bls.PublicKeyLen), RemainingBalanceOwner: utils.RandomBytes(32), + DeactivationOwner: utils.RandomBytes(32), StartTime: rand.Uint64(), // #nosec G404 }, expected: true, @@ -115,6 +117,7 @@ func TestSubnetOnlyValidator_validateConstants(t *testing.T) { NodeID: sov.NodeID, PublicKey: sov.PublicKey, RemainingBalanceOwner: sov.RemainingBalanceOwner, + DeactivationOwner: sov.DeactivationOwner, StartTime: sov.StartTime, }, }, @@ -126,6 +129,7 @@ func TestSubnetOnlyValidator_validateConstants(t *testing.T) { NodeID: ids.GenerateTestNodeID(), PublicKey: sov.PublicKey, RemainingBalanceOwner: sov.RemainingBalanceOwner, + DeactivationOwner: sov.DeactivationOwner, StartTime: sov.StartTime, }, }, @@ -137,6 +141,7 @@ func TestSubnetOnlyValidator_validateConstants(t *testing.T) { NodeID: sov.NodeID, PublicKey: utils.RandomBytes(bls.PublicKeyLen), RemainingBalanceOwner: sov.RemainingBalanceOwner, + DeactivationOwner: sov.DeactivationOwner, StartTime: sov.StartTime, }, }, @@ -148,6 +153,19 @@ func TestSubnetOnlyValidator_validateConstants(t *testing.T) { NodeID: sov.NodeID, PublicKey: sov.PublicKey, RemainingBalanceOwner: utils.RandomBytes(32), + DeactivationOwner: sov.DeactivationOwner, + StartTime: sov.StartTime, + }, + }, + { + name: "different deactivationOwner", + v: SubnetOnlyValidator{ + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: sov.PublicKey, + RemainingBalanceOwner: sov.RemainingBalanceOwner, + DeactivationOwner: utils.RandomBytes(32), StartTime: sov.StartTime, }, }, @@ -159,6 +177,7 @@ func TestSubnetOnlyValidator_validateConstants(t *testing.T) { NodeID: sov.NodeID, PublicKey: sov.PublicKey, RemainingBalanceOwner: sov.RemainingBalanceOwner, + DeactivationOwner: sov.DeactivationOwner, StartTime: rand.Uint64(), // #nosec G404 }, }, From 556f1ebf3ffb704b03ad241b5b06189bb80ed508 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 1 Oct 2024 15:28:04 -0400 Subject: [PATCH 069/155] fix tests --- vms/platformvm/state/state_test.go | 258 +++++++++--------- .../state/subnet_only_validator_test.go | 16 +- 2 files changed, 135 insertions(+), 139 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index f093b90a587c..d0db7bd6d11c 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1424,13 +1424,12 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "initially active not modified", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, }, }, @@ -1438,13 +1437,12 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "initially inactive not modified", initial: []SubnetOnlyValidator{ { - ValidationID: ids.GenerateTestID(), - SubnetID: ids.GenerateTestID(), - NodeID: ids.GenerateTestNodeID(), - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive + ValidationID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive }, }, }, @@ -1452,23 +1450,21 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "initially active removed", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, }, sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 0, // Removed + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 0, // Removed }, }, }, @@ -1476,23 +1472,21 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "initially inactive removed", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive }, }, sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 0, // Removed + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 0, // Removed }, }, }, @@ -1500,24 +1494,22 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "increase active weight", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, }, sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 2, // Increased - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 2, // Increased + EndAccumulatedFee: 1, // Active }, }, }, @@ -1525,24 +1517,22 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "deactivate", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, }, sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive }, }, }, @@ -1550,24 +1540,22 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "reactivate", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 0, // Inactive + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive }, }, sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, }, }, @@ -1575,33 +1563,30 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "update multiple times", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, }, sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 2, // Not removed - EndAccumulatedFee: 1, // Inactive + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 2, // Not removed + EndAccumulatedFee: 1, // Inactive }, { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 3, // Not removed - EndAccumulatedFee: 1, // Inactive + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 3, // Not removed + EndAccumulatedFee: 1, // Inactive }, }, }, @@ -1609,32 +1594,29 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "change validationID", initial: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, }, sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 0, // Removed + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 0, // Removed }, { - ValidationID: ids.GenerateTestID(), - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: otherPKBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Inactive + ValidationID: ids.GenerateTestID(), + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: otherPKBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Inactive }, }, }, @@ -1642,21 +1624,19 @@ func TestSubnetOnlyValidators(t *testing.T) { name: "added and removed", sovs: []SubnetOnlyValidator{ { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 1, // Not removed - EndAccumulatedFee: 1, // Active + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 1, // Active }, { - ValidationID: sov.ValidationID, - SubnetID: sov.SubnetID, - NodeID: sov.NodeID, - PublicKey: pkBytes, - RemainingBalanceOwner: []byte{}, - Weight: 0, // Removed + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 0, // Removed }, }, }, @@ -1674,6 +1654,9 @@ func TestSubnetOnlyValidators(t *testing.T) { subnetIDs set.Set[ids.ID] ) for _, sov := range test.initial { + sov.RemainingBalanceOwner = []byte{} + sov.DeactivationOwner = []byte{} + require.NoError(state.PutSubnetOnlyValidator(sov)) initialSOVs[sov.ValidationID] = sov subnetIDs.Add(sov.SubnetID) @@ -1687,6 +1670,9 @@ func TestSubnetOnlyValidators(t *testing.T) { expectedSOVs := maps.Clone(initialSOVs) for _, sov := range test.sovs { + sov.RemainingBalanceOwner = []byte{} + sov.DeactivationOwner = []byte{} + require.NoError(d.PutSubnetOnlyValidator(sov)) expectedSOVs[sov.ValidationID] = sov subnetIDs.Add(sov.SubnetID) diff --git a/vms/platformvm/state/subnet_only_validator_test.go b/vms/platformvm/state/subnet_only_validator_test.go index 3ebf754382d9..c7194c7932cf 100644 --- a/vms/platformvm/state/subnet_only_validator_test.go +++ b/vms/platformvm/state/subnet_only_validator_test.go @@ -209,13 +209,22 @@ func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { pk := bls.PublicFromSecretKey(sk) pkBytes := bls.PublicKeyToUncompressedBytes(pk) - var owner fx.Owner = &secp256k1fx.OutputOwners{ + var remainingBalanceOwner fx.Owner = &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ ids.GenerateTestShortID(), }, } - ownerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &owner) + remainingBalanceOwnerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &remainingBalanceOwner) + require.NoError(err) + + var deactivationOwner fx.Owner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + } + deactivationOwnerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &deactivationOwner) require.NoError(err) vdr := SubnetOnlyValidator{ @@ -223,7 +232,8 @@ func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { SubnetID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), PublicKey: pkBytes, - RemainingBalanceOwner: ownerBytes, + RemainingBalanceOwner: remainingBalanceOwnerBytes, + DeactivationOwner: deactivationOwnerBytes, StartTime: rand.Uint64(), // #nosec G404 Weight: rand.Uint64(), // #nosec G404 MinNonce: rand.Uint64(), // #nosec G404 From 891bca4232d3690c64248e376e1efd961fba94bc Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 2 Oct 2024 15:25:01 -0400 Subject: [PATCH 070/155] Store conversionID --- vms/platformvm/service.go | 5 +- vms/platformvm/state/diff.go | 49 +- vms/platformvm/state/diff_test.go | 59 ++- vms/platformvm/state/mock_chain.go | 31 +- vms/platformvm/state/mock_diff.go | 31 +- vms/platformvm/state/mock_state.go | 45 +- vms/platformvm/state/state.go | 68 +-- vms/platformvm/state/state_test.go | 55 +- .../txs/executor/create_chain_test.go | 7 +- .../txs/executor/staker_tx_verification.go | 2 +- .../txs/executor/standard_tx_executor.go | 3 +- .../txs/executor/standard_tx_executor_test.go | 21 +- .../txs/executor/subnet_tx_verification.go | 2 +- x/merkledb/mock_db.go | 491 ++++++++++++++++++ 14 files changed, 706 insertions(+), 163 deletions(-) create mode 100644 x/merkledb/mock_db.go diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 91b7810d30df..c4779e85789e 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -440,6 +440,7 @@ type GetSubnetResponse struct { // subnet transformation tx ID for an elastic subnet SubnetTransformationTxID ids.ID `json:"subnetTransformationTxID"` // subnet manager information for a permissionless L1 + ConversionID ids.ID `json:"conversionID"` ManagerChainID ids.ID `json:"managerChainID"` ManagerAddress types.JSONByteSlice `json:"managerAddress"` } @@ -490,12 +491,14 @@ func (s *Service) GetSubnet(_ *http.Request, args *GetSubnetArgs, response *GetS return err } - switch chainID, addr, err := s.vm.state.GetSubnetManager(args.SubnetID); err { + switch conversionID, chainID, addr, err := s.vm.state.GetSubnetConversion(args.SubnetID); err { case nil: response.IsPermissioned = false + response.ConversionID = conversionID response.ManagerChainID = chainID response.ManagerAddress = addr case database.ErrNotFound: + response.ConversionID = ids.Empty response.ManagerChainID = ids.Empty response.ManagerAddress = []byte(nil) default: diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 5110aa1ea6e0..470d1ea1ff69 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -55,8 +55,8 @@ type diff struct { addedSubnetIDs []ids.ID // Subnet ID --> Owner of the subnet subnetOwners map[ids.ID]fx.Owner - // Subnet ID --> Manager of the subnet - subnetManagers map[ids.ID]chainIDAndAddr + // Subnet ID --> Conversion of the subnet + subnetConversions map[ids.ID]subnetConversion // Subnet ID --> Tx that transforms the subnet transformedSubnets map[ids.ID]*txs.Tx @@ -79,17 +79,17 @@ func NewDiff( return nil, fmt.Errorf("%w: %s", ErrMissingParentState, parentID) } return &diff{ - parentID: parentID, - stateVersions: stateVersions, - timestamp: parentState.GetTimestamp(), - feeState: parentState.GetFeeState(), - sovExcess: parentState.GetSoVExcess(), - accruedFees: parentState.GetAccruedFees(), - parentActiveSOVs: parentState.NumActiveSubnetOnlyValidators(), - expiryDiff: newExpiryDiff(), - sovDiff: newSubnetOnlyValidatorsDiff(), - subnetOwners: make(map[ids.ID]fx.Owner), - subnetManagers: make(map[ids.ID]chainIDAndAddr), + parentID: parentID, + stateVersions: stateVersions, + timestamp: parentState.GetTimestamp(), + feeState: parentState.GetFeeState(), + sovExcess: parentState.GetSoVExcess(), + accruedFees: parentState.GetAccruedFees(), + parentActiveSOVs: parentState.NumActiveSubnetOnlyValidators(), + expiryDiff: newExpiryDiff(), + sovDiff: newSubnetOnlyValidatorsDiff(), + subnetOwners: make(map[ids.ID]fx.Owner), + subnetConversions: make(map[ids.ID]subnetConversion), }, nil } @@ -435,23 +435,24 @@ func (d *diff) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { d.subnetOwners[subnetID] = owner } -func (d *diff) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { - if manager, exists := d.subnetManagers[subnetID]; exists { - return manager.ChainID, manager.Addr, nil +func (d *diff) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { + if conversion, exists := d.subnetConversions[subnetID]; exists { + return conversion.ConversionID, conversion.ChainID, conversion.Addr, nil } // If the subnet manager was not assigned in this diff, ask the parent state. parentState, ok := d.stateVersions.GetState(d.parentID) if !ok { - return ids.Empty, nil, ErrMissingParentState + return ids.Empty, ids.Empty, nil, ErrMissingParentState } - return parentState.GetSubnetManager(subnetID) + return parentState.GetSubnetConversion(subnetID) } -func (d *diff) SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) { - d.subnetManagers[subnetID] = chainIDAndAddr{ - ChainID: chainID, - Addr: addr, +func (d *diff) SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { + d.subnetConversions[subnetID] = subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, } } @@ -675,8 +676,8 @@ func (d *diff) Apply(baseState Chain) error { for subnetID, owner := range d.subnetOwners { baseState.SetSubnetOwner(subnetID, owner) } - for subnetID, manager := range d.subnetManagers { - baseState.SetSubnetManager(subnetID, manager.ChainID, manager.Addr) + for subnetID, conversion := range d.subnetConversions { + baseState.SetSubnetConversion(subnetID, conversion.ConversionID, conversion.ChainID, conversion.Addr) } return nil } diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 235b8ad4e0a0..cbbf3c12a4a5 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -897,44 +897,63 @@ func TestDiffSubnetOwner(t *testing.T) { func TestDiffSubnetManager(t *testing.T) { var ( - require = require.New(t) - state = newTestState(t, memdb.New()) - newManager = chainIDAndAddr{ids.GenerateTestID(), []byte{1, 2, 3, 4}} - subnetID = ids.GenerateTestID() + require = require.New(t) + state = newTestState(t, memdb.New()) + subnetID = ids.GenerateTestID() + expectedConversion = subnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: []byte{1, 2, 3, 4}, + } ) - chainID, addr, err := state.GetSubnetManager(subnetID) + conversionID, chainID, addr, err := state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) + require.Zero(conversionID) + require.Zero(chainID) + require.Zero(addr) d, err := NewDiffOn(state) require.NoError(err) - chainID, addr, err = d.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) + require.Zero(conversionID) + require.Zero(chainID) + require.Zero(addr) // Setting a subnet manager should be reflected on diff not state - d.SetSubnetManager(subnetID, newManager.ChainID, newManager.Addr) - chainID, addr, err = d.GetSubnetManager(subnetID) + d.SetSubnetConversion(subnetID, expectedConversion.ConversionID, expectedConversion.ChainID, expectedConversion.Addr) + conversionID, chainID, addr, err = d.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal(newManager.ChainID, chainID) - require.Equal(newManager.Addr, addr) + require.Equal( + expectedConversion, + subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }, + ) - chainID, addr, err = state.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) + require.Zero(conversionID) + require.Zero(chainID) + require.Zero(addr) // State should reflect new subnet manager after diff is applied require.NoError(d.Apply(state)) - chainID, addr, err = state.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal(newManager.ChainID, chainID) - require.Equal(newManager.Addr, addr) + require.Equal( + expectedConversion, + subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }, + ) } func TestDiffStacking(t *testing.T) { diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 4d34407ee3c2..a290089d464e 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -382,20 +382,21 @@ func (mr *MockChainMockRecorder) GetSoVExcess() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockChain)(nil).GetSoVExcess)) } -// GetSubnetManager mocks base method. -func (m *MockChain) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { +// GetSubnetConversion mocks base method. +func (m *MockChain) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetManager", subnetID) + ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret1, _ := ret[1].(ids.ID) + ret2, _ := ret[2].([]byte) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 } -// GetSubnetManager indicates an expected call of GetSubnetManager. -func (mr *MockChainMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { +// GetSubnetConversion indicates an expected call of GetSubnetConversion. +func (mr *MockChainMockRecorder) GetSubnetConversion(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockChain)(nil).GetSubnetManager), subnetID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetConversion", reflect.TypeOf((*MockChain)(nil).GetSubnetConversion), subnetID) } // GetSubnetOnlyValidator mocks base method. @@ -672,16 +673,16 @@ func (mr *MockChainMockRecorder) SetSoVExcess(e any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockChain)(nil).SetSoVExcess), e) } -// SetSubnetManager mocks base method. -func (m *MockChain) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { +// SetSubnetConversion mocks base method. +func (m *MockChain) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetManager", subnetID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) } -// SetSubnetManager indicates an expected call of SetSubnetManager. -func (mr *MockChainMockRecorder) SetSubnetManager(subnetID, chainID, addr any) *gomock.Call { +// SetSubnetConversion indicates an expected call of SetSubnetConversion. +func (mr *MockChainMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetManager", reflect.TypeOf((*MockChain)(nil).SetSubnetManager), subnetID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockChain)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index 95be0ff1fb5e..1d150e3310b9 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -396,20 +396,21 @@ func (mr *MockDiffMockRecorder) GetSoVExcess() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockDiff)(nil).GetSoVExcess)) } -// GetSubnetManager mocks base method. -func (m *MockDiff) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { +// GetSubnetConversion mocks base method. +func (m *MockDiff) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetManager", subnetID) + ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret1, _ := ret[1].(ids.ID) + ret2, _ := ret[2].([]byte) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 } -// GetSubnetManager indicates an expected call of GetSubnetManager. -func (mr *MockDiffMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { +// GetSubnetConversion indicates an expected call of GetSubnetConversion. +func (mr *MockDiffMockRecorder) GetSubnetConversion(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockDiff)(nil).GetSubnetManager), subnetID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetConversion", reflect.TypeOf((*MockDiff)(nil).GetSubnetConversion), subnetID) } // GetSubnetOnlyValidator mocks base method. @@ -686,16 +687,16 @@ func (mr *MockDiffMockRecorder) SetSoVExcess(e any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockDiff)(nil).SetSoVExcess), e) } -// SetSubnetManager mocks base method. -func (m *MockDiff) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { +// SetSubnetConversion mocks base method. +func (m *MockDiff) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetManager", subnetID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) } -// SetSubnetManager indicates an expected call of SetSubnetManager. -func (mr *MockDiffMockRecorder) SetSubnetManager(subnetID, chainID, addr any) *gomock.Call { +// SetSubnetConversion indicates an expected call of SetSubnetConversion. +func (mr *MockDiffMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetManager", reflect.TypeOf((*MockDiff)(nil).SetSubnetManager), subnetID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockDiff)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 41d06025d3f8..1352236f7e97 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -586,6 +586,23 @@ func (mr *MockStateMockRecorder) GetStatelessBlock(blockID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockState)(nil).GetStatelessBlock), blockID) } +// GetSubnetConversion mocks base method. +func (m *MockState) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(ids.ID) + ret2, _ := ret[2].([]byte) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// GetSubnetConversion indicates an expected call of GetSubnetConversion. +func (mr *MockStateMockRecorder) GetSubnetConversion(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetConversion", reflect.TypeOf((*MockState)(nil).GetSubnetConversion), subnetID) +} + // GetSubnetIDs mocks base method. func (m *MockState) GetSubnetIDs() ([]ids.ID, error) { m.ctrl.T.Helper() @@ -601,22 +618,6 @@ func (mr *MockStateMockRecorder) GetSubnetIDs() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetIDs", reflect.TypeOf((*MockState)(nil).GetSubnetIDs)) } -// GetSubnetManager mocks base method. -func (m *MockState) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetManager", subnetID) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetSubnetManager indicates an expected call of GetSubnetManager. -func (mr *MockStateMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockState)(nil).GetSubnetManager), subnetID) -} - // GetSubnetOnlyValidator mocks base method. func (m *MockState) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { m.ctrl.T.Helper() @@ -945,16 +946,16 @@ func (mr *MockStateMockRecorder) SetSoVExcess(e any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockState)(nil).SetSoVExcess), e) } -// SetSubnetManager mocks base method. -func (m *MockState) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { +// SetSubnetConversion mocks base method. +func (m *MockState) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetManager", subnetID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) } -// SetSubnetManager indicates an expected call of SetSubnetManager. -func (mr *MockStateMockRecorder) SetSubnetManager(subnetID, chainID, addr any) *gomock.Call { +// SetSubnetConversion indicates an expected call of SetSubnetConversion. +func (mr *MockStateMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetManager", reflect.TypeOf((*MockState)(nil).SetSubnetManager), subnetID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockState)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 12405f4ab60e..77698f07449e 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -135,8 +135,8 @@ type Chain interface { GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) - GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) - SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) + GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) + SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) AddSubnetTransformation(transformSubnetTx *txs.Tx) @@ -387,9 +387,9 @@ type state struct { subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner; if the entry is nil, it is not in the database subnetOwnerDB database.Database - subnetManagers map[ids.ID]chainIDAndAddr // map of subnetID -> manager of the subnet - subnetManagerCache cache.Cacher[ids.ID, chainIDAndAddr] // cache of subnetID -> manager - subnetManagerDB database.Database + subnetConversions map[ids.ID]subnetConversion // map of subnetID -> manager of the subnet + subnetConversionCache cache.Cacher[ids.ID, subnetConversion] // cache of subnetID -> manager + subnetConversionDB database.Database transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx; if the entry is nil, it is not in the database @@ -463,9 +463,10 @@ type fxOwnerAndSize struct { size int } -type chainIDAndAddr struct { - ChainID ids.ID `serialize:"true"` - Addr []byte `serialize:"true"` +type subnetConversion struct { + ConversionID ids.ID `serialize:"true"` + ChainID ids.ID `serialize:"true"` + Addr []byte `serialize:"true"` } func txSize(_ ids.ID, tx *txs.Tx) int { @@ -579,10 +580,10 @@ func New( } subnetManagerDB := prefixdb.New(SubnetManagerPrefix, baseDB) - subnetManagerCache, err := metercacher.New[ids.ID, chainIDAndAddr]( + subnetManagerCache, err := metercacher.New[ids.ID, subnetConversion]( "subnet_manager_cache", metricsReg, - cache.NewSizedLRU[ids.ID, chainIDAndAddr](execCfg.SubnetManagerCacheSize, func(_ ids.ID, f chainIDAndAddr) int { + cache.NewSizedLRU[ids.ID, subnetConversion](execCfg.SubnetManagerCacheSize, func(_ ids.ID, f subnetConversion) int { return 2*ids.IDLen + len(f.Addr) }), ) @@ -701,9 +702,9 @@ func New( subnetOwnerDB: subnetOwnerDB, subnetOwnerCache: subnetOwnerCache, - subnetManagers: make(map[ids.ID]chainIDAndAddr), - subnetManagerDB: subnetManagerDB, - subnetManagerCache: subnetManagerCache, + subnetConversions: make(map[ids.ID]subnetConversion), + subnetConversionDB: subnetManagerDB, + subnetConversionCache: subnetManagerCache, transformedSubnets: make(map[ids.ID]*txs.Tx), transformedSubnetCache: transformedSubnetCache, @@ -950,32 +951,33 @@ func (s *state) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { s.subnetOwners[subnetID] = owner } -func (s *state) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { - if chainIDAndAddr, exists := s.subnetManagers[subnetID]; exists { - return chainIDAndAddr.ChainID, chainIDAndAddr.Addr, nil +func (s *state) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { + if conversion, exists := s.subnetConversions[subnetID]; exists { + return conversion.ConversionID, conversion.ChainID, conversion.Addr, nil } - if chainIDAndAddr, cached := s.subnetManagerCache.Get(subnetID); cached { - return chainIDAndAddr.ChainID, chainIDAndAddr.Addr, nil + if conversion, cached := s.subnetConversionCache.Get(subnetID); cached { + return conversion.ConversionID, conversion.ChainID, conversion.Addr, nil } - chainIDAndAddrBytes, err := s.subnetManagerDB.Get(subnetID[:]) + conversionBytes, err := s.subnetConversionDB.Get(subnetID[:]) if err != nil { - return ids.Empty, nil, err + return ids.Empty, ids.Empty, nil, err } - var manager chainIDAndAddr - if _, err := block.GenesisCodec.Unmarshal(chainIDAndAddrBytes, &manager); err != nil { - return ids.Empty, nil, err + var conversion subnetConversion + if _, err := block.GenesisCodec.Unmarshal(conversionBytes, &conversion); err != nil { + return ids.Empty, ids.Empty, nil, err } - s.subnetManagerCache.Put(subnetID, manager) - return manager.ChainID, manager.Addr, nil + s.subnetConversionCache.Put(subnetID, conversion) + return conversion.ConversionID, conversion.ChainID, conversion.Addr, nil } -func (s *state) SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) { - s.subnetManagers[subnetID] = chainIDAndAddr{ - ChainID: chainID, - Addr: addr, +func (s *state) SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { + s.subnetConversions[subnetID] = subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, } } @@ -2916,19 +2918,19 @@ func (s *state) writeSubnetOwners() error { } func (s *state) writeSubnetManagers() error { - for subnetID, manager := range s.subnetManagers { + for subnetID, manager := range s.subnetConversions { subnetID := subnetID manager := manager - delete(s.subnetManagers, subnetID) + delete(s.subnetConversions, subnetID) managerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &manager) if err != nil { return fmt.Errorf("failed to marshal subnet manager: %w", err) } - s.subnetManagerCache.Put(subnetID, manager) + s.subnetConversionCache.Put(subnetID, manager) - if err := s.subnetManagerDB.Put(subnetID[:], managerBytes); err != nil { + if err := s.subnetConversionDB.Put(subnetID[:], managerBytes); err != nil { return fmt.Errorf("failed to write subnet manager: %w", err) } } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index d0db7bd6d11c..294b3e943e9b 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1106,23 +1106,24 @@ func TestStateSubnetOwner(t *testing.T) { func TestStateSubnetManager(t *testing.T) { tests := []struct { name string - setup func(t *testing.T, s State, subnetID ids.ID, chainID ids.ID, addr []byte) + setup func(t *testing.T, s State, subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) }{ { name: "in-memory", - setup: func(_ *testing.T, s State, subnetID ids.ID, chainID ids.ID, addr []byte) { - s.SetSubnetManager(subnetID, chainID, addr) + setup: func(_ *testing.T, s State, subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { + s.SetSubnetConversion(subnetID, conversionID, chainID, addr) }, }, { name: "cache", - setup: func(t *testing.T, s State, subnetID ids.ID, chainID ids.ID, addr []byte) { - subnetManagerCache := s.(*state).subnetManagerCache + setup: func(t *testing.T, s State, subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { + subnetManagerCache := s.(*state).subnetConversionCache require.Zero(t, subnetManagerCache.Len()) - subnetManagerCache.Put(subnetID, chainIDAndAddr{ - ChainID: chainID, - Addr: addr, + subnetManagerCache.Put(subnetID, subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, }) require.Equal(t, 1, subnetManagerCache.Len()) }, @@ -1130,25 +1131,35 @@ func TestStateSubnetManager(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - initializedState := newTestState(t, memdb.New()) + var ( + require = require.New(t) + state = newTestState(t, memdb.New()) + subnetID = ids.GenerateTestID() + expectedConversion = subnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: []byte{'a', 'd', 'd', 'r'}, + } + ) - subnetID := ids.GenerateTestID() - chainID, addr, err := initializedState.GetSubnetManager(subnetID) + conversionID, chainID, addr, err := state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) - - expectedChainID := ids.GenerateTestID() - expectedAddr := []byte{'a', 'd', 'd', 'r'} + require.Zero(conversionID) + require.Zero(chainID) + require.Zero(addr) - test.setup(t, initializedState, subnetID, expectedChainID, expectedAddr) + test.setup(t, state, subnetID, expectedConversion.ConversionID, expectedConversion.ChainID, expectedConversion.Addr) - chainID, addr, err = initializedState.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal(expectedChainID, chainID) - require.Equal(expectedAddr, addr) + require.Equal( + expectedConversion, + subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }, + ) }) } } diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 61fead2677a7..cf3e0e8d2cf2 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -286,7 +286,12 @@ func TestEtnaCreateChainTxInvalidWithManagedSubnet(t *testing.T) { builderDiff, err := state.NewDiffOn(stateDiff) require.NoError(err) - stateDiff.SetSubnetManager(subnetID, ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}) + stateDiff.SetSubnetConversion( + subnetID, + ids.GenerateTestID(), + ids.GenerateTestID(), + []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, + ) feeCalculator := state.PickFeeCalculator(env.config, builderDiff) executor := StandardTxExecutor{ diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index d458c01ab259..69b8cd567586 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -308,7 +308,7 @@ func verifyRemoveSubnetValidatorTx( } if backend.Config.UpgradeConfig.IsEtnaActivated(currentTimestamp) { - _, _, err := chainState.GetSubnetManager(tx.Subnet) + _, _, _, err := chainState.GetSubnetConversion(tx.Subnet) if err == nil { return nil, false, fmt.Errorf("%w: %q", ErrRemoveValidatorManagedSubnet, tx.Subnet) } diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index d0a37f5ba82c..14069b1e4e43 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -542,7 +542,8 @@ func (e *StandardTxExecutor) ConvertSubnetTx(tx *txs.ConvertSubnetTx) error { // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) // Set the new Subnet manager in the database - e.State.SetSubnetManager(tx.Subnet, tx.ChainID, tx.Address) + // TODO: Populate the conversionID + e.State.SetSubnetConversion(tx.Subnet, ids.Empty, tx.ChainID, tx.Address) return nil } diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index e11ad73ac5d9..4dbd740f9a7b 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -913,7 +913,12 @@ func TestEtnaStandardTxExecutorAddSubnetValidator(t *testing.T) { onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - onAcceptState.SetSubnetManager(subnetID, ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}) + onAcceptState.SetSubnetConversion( + subnetID, + ids.GenerateTestID(), + ids.GenerateTestID(), + []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, + ) executor := StandardTxExecutor{ Backend: &env.backend, @@ -1994,7 +1999,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { name: "attempted to remove subnet validator after subnet manager is set", newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, nil).AnyTimes() + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, nil).AnyTimes() env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() cfg := &config.Config{ @@ -2245,7 +2250,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.Empty, nil, database.ErrNotFound).Times(1) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.Empty, ids.Empty, nil, database.ErrNotFound).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) env.flowChecker.EXPECT().VerifySpend( @@ -2284,7 +2289,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), make([]byte, 20), nil) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), ids.GenerateTestID(), make([]byte, 20), nil) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) @@ -2319,7 +2324,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.Empty, nil, database.ErrNotFound).Times(1) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.Empty, ids.Empty, nil, database.ErrNotFound).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) env.flowChecker.EXPECT().VerifySpend( @@ -2478,7 +2483,7 @@ func TestStandardExecutorConvertSubnetTx(t *testing.T) { { name: "invalid if subnet is converted", updateExecutor: func(e *StandardTxExecutor) { - e.State.SetSubnetManager(subnetID, ids.GenerateTestID(), nil) + e.State.SetSubnetConversion(subnetID, ids.GenerateTestID(), ids.GenerateTestID(), nil) }, expectedErr: errIsImmutable, }, @@ -2566,8 +2571,10 @@ func TestStandardExecutorConvertSubnetTx(t *testing.T) { require.Equal(expectedUTXO, utxo) } - stateChainID, stateAddress, err := diff.GetSubnetManager(subnetID) + // TODO: Populate the conversionID + stateConversionID, stateChainID, stateAddress, err := diff.GetSubnetConversion(subnetID) require.NoError(err) + require.Zero(stateConversionID) require.Equal(chainID, stateChainID) require.Equal(address, stateAddress) }) diff --git a/vms/platformvm/txs/executor/subnet_tx_verification.go b/vms/platformvm/txs/executor/subnet_tx_verification.go index 6e5ecb9a34f5..7466fd78227e 100644 --- a/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -43,7 +43,7 @@ func verifyPoASubnetAuthorization( return nil, err } - _, _, err = chainState.GetSubnetManager(subnetID) + _, _, _, err = chainState.GetSubnetConversion(subnetID) if err == nil { return nil, fmt.Errorf("%q %w", subnetID, errIsImmutable) } diff --git a/x/merkledb/mock_db.go b/x/merkledb/mock_db.go new file mode 100644 index 000000000000..c3bf69cf22f6 --- /dev/null +++ b/x/merkledb/mock_db.go @@ -0,0 +1,491 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: x/merkledb/db.go +// +// Generated by this command: +// +// mockgen -source=x/merkledb/db.go -destination=x/merkledb/mock_db.go -package=merkledb -exclude_interfaces=ChangeProofer,RangeProofer,Clearer,Prefetcher -mock_names=MockMerkleDB=MockMerkleDB +// + +// Package merkledb is a generated GoMock package. +package merkledb + +import ( + context "context" + reflect "reflect" + + database "github.com/ava-labs/avalanchego/database" + ids "github.com/ava-labs/avalanchego/ids" + maybe "github.com/ava-labs/avalanchego/utils/maybe" + gomock "go.uber.org/mock/gomock" +) + +// MockMerkleDB is a mock of MerkleDB interface. +type MockMerkleDB struct { + ctrl *gomock.Controller + recorder *MockMerkleDBMockRecorder +} + +// MockMerkleDBMockRecorder is the mock recorder for MockMerkleDB. +type MockMerkleDBMockRecorder struct { + mock *MockMerkleDB +} + +// NewMockMerkleDB creates a new mock instance. +func NewMockMerkleDB(ctrl *gomock.Controller) *MockMerkleDB { + mock := &MockMerkleDB{ctrl: ctrl} + mock.recorder = &MockMerkleDBMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMerkleDB) EXPECT() *MockMerkleDBMockRecorder { + return m.recorder +} + +// Clear mocks base method. +func (m *MockMerkleDB) Clear() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Clear") + ret0, _ := ret[0].(error) + return ret0 +} + +// Clear indicates an expected call of Clear. +func (mr *MockMerkleDBMockRecorder) Clear() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockMerkleDB)(nil).Clear)) +} + +// Close mocks base method. +func (m *MockMerkleDB) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockMerkleDBMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMerkleDB)(nil).Close)) +} + +// CommitChangeProof mocks base method. +func (m *MockMerkleDB) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitChangeProof", ctx, proof) + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitChangeProof indicates an expected call of CommitChangeProof. +func (mr *MockMerkleDBMockRecorder) CommitChangeProof(ctx, proof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitChangeProof), ctx, proof) +} + +// CommitRangeProof mocks base method. +func (m *MockMerkleDB) CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitRangeProof", ctx, start, end, proof) + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitRangeProof indicates an expected call of CommitRangeProof. +func (mr *MockMerkleDBMockRecorder) CommitRangeProof(ctx, start, end, proof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitRangeProof), ctx, start, end, proof) +} + +// Compact mocks base method. +func (m *MockMerkleDB) Compact(start, limit []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Compact", start, limit) + ret0, _ := ret[0].(error) + return ret0 +} + +// Compact indicates an expected call of Compact. +func (mr *MockMerkleDBMockRecorder) Compact(start, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compact", reflect.TypeOf((*MockMerkleDB)(nil).Compact), start, limit) +} + +// Delete mocks base method. +func (m *MockMerkleDB) Delete(key []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockMerkleDBMockRecorder) Delete(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockMerkleDB)(nil).Delete), key) +} + +// Get mocks base method. +func (m *MockMerkleDB) Get(key []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", key) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockMerkleDBMockRecorder) Get(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMerkleDB)(nil).Get), key) +} + +// GetChangeProof mocks base method. +func (m *MockMerkleDB) GetChangeProof(ctx context.Context, startRootID, endRootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*ChangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChangeProof", ctx, startRootID, endRootID, start, end, maxLength) + ret0, _ := ret[0].(*ChangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChangeProof indicates an expected call of GetChangeProof. +func (mr *MockMerkleDBMockRecorder) GetChangeProof(ctx, startRootID, endRootID, start, end, maxLength any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetChangeProof), ctx, startRootID, endRootID, start, end, maxLength) +} + +// GetMerkleRoot mocks base method. +func (m *MockMerkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMerkleRoot", ctx) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMerkleRoot indicates an expected call of GetMerkleRoot. +func (mr *MockMerkleDBMockRecorder) GetMerkleRoot(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMerkleRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetMerkleRoot), ctx) +} + +// GetProof mocks base method. +func (m *MockMerkleDB) GetProof(ctx context.Context, keyBytes []byte) (*Proof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProof", ctx, keyBytes) + ret0, _ := ret[0].(*Proof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProof indicates an expected call of GetProof. +func (mr *MockMerkleDBMockRecorder) GetProof(ctx, keyBytes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockMerkleDB)(nil).GetProof), ctx, keyBytes) +} + +// GetRangeProof mocks base method. +func (m *MockMerkleDB) GetRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRangeProof", ctx, start, end, maxLength) + ret0, _ := ret[0].(*RangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRangeProof indicates an expected call of GetRangeProof. +func (mr *MockMerkleDBMockRecorder) GetRangeProof(ctx, start, end, maxLength any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProof), ctx, start, end, maxLength) +} + +// GetRangeProofAtRoot mocks base method. +func (m *MockMerkleDB) GetRangeProofAtRoot(ctx context.Context, rootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRangeProofAtRoot", ctx, rootID, start, end, maxLength) + ret0, _ := ret[0].(*RangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRangeProofAtRoot indicates an expected call of GetRangeProofAtRoot. +func (mr *MockMerkleDBMockRecorder) GetRangeProofAtRoot(ctx, rootID, start, end, maxLength any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProofAtRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProofAtRoot), ctx, rootID, start, end, maxLength) +} + +// GetValue mocks base method. +func (m *MockMerkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValue", ctx, key) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetValue indicates an expected call of GetValue. +func (mr *MockMerkleDBMockRecorder) GetValue(ctx, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValue", reflect.TypeOf((*MockMerkleDB)(nil).GetValue), ctx, key) +} + +// GetValues mocks base method. +func (m *MockMerkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValues", ctx, keys) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].([]error) + return ret0, ret1 +} + +// GetValues indicates an expected call of GetValues. +func (mr *MockMerkleDBMockRecorder) GetValues(ctx, keys any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValues", reflect.TypeOf((*MockMerkleDB)(nil).GetValues), ctx, keys) +} + +// Has mocks base method. +func (m *MockMerkleDB) Has(key []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", key) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockMerkleDBMockRecorder) Has(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMerkleDB)(nil).Has), key) +} + +// HealthCheck mocks base method. +func (m *MockMerkleDB) HealthCheck(arg0 context.Context) (any, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HealthCheck", arg0) + ret0, _ := ret[0].(any) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HealthCheck indicates an expected call of HealthCheck. +func (mr *MockMerkleDBMockRecorder) HealthCheck(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockMerkleDB)(nil).HealthCheck), arg0) +} + +// NewBatch mocks base method. +func (m *MockMerkleDB) NewBatch() database.Batch { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBatch") + ret0, _ := ret[0].(database.Batch) + return ret0 +} + +// NewBatch indicates an expected call of NewBatch. +func (mr *MockMerkleDBMockRecorder) NewBatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockMerkleDB)(nil).NewBatch)) +} + +// NewIterator mocks base method. +func (m *MockMerkleDB) NewIterator() database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIterator") + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIterator indicates an expected call of NewIterator. +func (mr *MockMerkleDBMockRecorder) NewIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIterator", reflect.TypeOf((*MockMerkleDB)(nil).NewIterator)) +} + +// NewIteratorWithPrefix mocks base method. +func (m *MockMerkleDB) NewIteratorWithPrefix(prefix []byte) database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIteratorWithPrefix", prefix) + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIteratorWithPrefix indicates an expected call of NewIteratorWithPrefix. +func (mr *MockMerkleDBMockRecorder) NewIteratorWithPrefix(prefix any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithPrefix), prefix) +} + +// NewIteratorWithStart mocks base method. +func (m *MockMerkleDB) NewIteratorWithStart(start []byte) database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIteratorWithStart", start) + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIteratorWithStart indicates an expected call of NewIteratorWithStart. +func (mr *MockMerkleDBMockRecorder) NewIteratorWithStart(start any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStart", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStart), start) +} + +// NewIteratorWithStartAndPrefix mocks base method. +func (m *MockMerkleDB) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIteratorWithStartAndPrefix", start, prefix) + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIteratorWithStartAndPrefix indicates an expected call of NewIteratorWithStartAndPrefix. +func (mr *MockMerkleDBMockRecorder) NewIteratorWithStartAndPrefix(start, prefix any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStartAndPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStartAndPrefix), start, prefix) +} + +// NewView mocks base method. +func (m *MockMerkleDB) NewView(ctx context.Context, changes ViewChanges) (View, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewView", ctx, changes) + ret0, _ := ret[0].(View) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewView indicates an expected call of NewView. +func (mr *MockMerkleDBMockRecorder) NewView(ctx, changes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockMerkleDB)(nil).NewView), ctx, changes) +} + +// PrefetchPath mocks base method. +func (m *MockMerkleDB) PrefetchPath(key []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrefetchPath", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// PrefetchPath indicates an expected call of PrefetchPath. +func (mr *MockMerkleDBMockRecorder) PrefetchPath(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPath", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPath), key) +} + +// PrefetchPaths mocks base method. +func (m *MockMerkleDB) PrefetchPaths(keys [][]byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrefetchPaths", keys) + ret0, _ := ret[0].(error) + return ret0 +} + +// PrefetchPaths indicates an expected call of PrefetchPaths. +func (mr *MockMerkleDBMockRecorder) PrefetchPaths(keys any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPaths", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPaths), keys) +} + +// Put mocks base method. +func (m *MockMerkleDB) Put(key, value []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockMerkleDBMockRecorder) Put(key, value any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMerkleDB)(nil).Put), key, value) +} + +// VerifyChangeProof mocks base method. +func (m *MockMerkleDB) VerifyChangeProof(ctx context.Context, proof *ChangeProof, start, end maybe.Maybe[[]byte], expectedEndRootID ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyChangeProof", ctx, proof, start, end, expectedEndRootID) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyChangeProof indicates an expected call of VerifyChangeProof. +func (mr *MockMerkleDBMockRecorder) VerifyChangeProof(ctx, proof, start, end, expectedEndRootID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).VerifyChangeProof), ctx, proof, start, end, expectedEndRootID) +} + +// getEditableNode mocks base method. +func (m *MockMerkleDB) getEditableNode(key Key, hasValue bool) (*node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getEditableNode", key, hasValue) + ret0, _ := ret[0].(*node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getEditableNode indicates an expected call of getEditableNode. +func (mr *MockMerkleDBMockRecorder) getEditableNode(key, hasValue any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getEditableNode", reflect.TypeOf((*MockMerkleDB)(nil).getEditableNode), key, hasValue) +} + +// getNode mocks base method. +func (m *MockMerkleDB) getNode(key Key, hasValue bool) (*node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getNode", key, hasValue) + ret0, _ := ret[0].(*node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getNode indicates an expected call of getNode. +func (mr *MockMerkleDBMockRecorder) getNode(key, hasValue any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getNode", reflect.TypeOf((*MockMerkleDB)(nil).getNode), key, hasValue) +} + +// getRoot mocks base method. +func (m *MockMerkleDB) getRoot() maybe.Maybe[*node] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getRoot") + ret0, _ := ret[0].(maybe.Maybe[*node]) + return ret0 +} + +// getRoot indicates an expected call of getRoot. +func (mr *MockMerkleDBMockRecorder) getRoot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getRoot", reflect.TypeOf((*MockMerkleDB)(nil).getRoot)) +} + +// getTokenSize mocks base method. +func (m *MockMerkleDB) getTokenSize() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getTokenSize") + ret0, _ := ret[0].(int) + return ret0 +} + +// getTokenSize indicates an expected call of getTokenSize. +func (mr *MockMerkleDBMockRecorder) getTokenSize() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getTokenSize", reflect.TypeOf((*MockMerkleDB)(nil).getTokenSize)) +} + +// getValue mocks base method. +func (m *MockMerkleDB) getValue(key Key) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getValue", key) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getValue indicates an expected call of getValue. +func (mr *MockMerkleDBMockRecorder) getValue(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getValue", reflect.TypeOf((*MockMerkleDB)(nil).getValue), key) +} From a33e0dfc18152081552384432fa768987cbd4f3e Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 2 Oct 2024 15:27:29 -0400 Subject: [PATCH 071/155] Include validationID --- vms/platformvm/state/state.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 77698f07449e..129d210b120c 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2373,10 +2373,12 @@ func (s *state) writeSubnetOnlyValidators(updateValidators bool, height uint64) // Perform additions: for validationID, sov := range sovChanges { + validationID := validationID + subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) copy(subnetIDNodeIDKey, sov.SubnetID[:]) copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) - if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, nil); err != nil { + if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { return err } From 01c9072aafc84e22efeb0ba0ff7f7da9d8b280cd Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 3 Oct 2024 16:17:00 -0400 Subject: [PATCH 072/155] remove unexpected file --- x/merkledb/mock_db.go | 491 ------------------------------------------ 1 file changed, 491 deletions(-) delete mode 100644 x/merkledb/mock_db.go diff --git a/x/merkledb/mock_db.go b/x/merkledb/mock_db.go deleted file mode 100644 index c3bf69cf22f6..000000000000 --- a/x/merkledb/mock_db.go +++ /dev/null @@ -1,491 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: x/merkledb/db.go -// -// Generated by this command: -// -// mockgen -source=x/merkledb/db.go -destination=x/merkledb/mock_db.go -package=merkledb -exclude_interfaces=ChangeProofer,RangeProofer,Clearer,Prefetcher -mock_names=MockMerkleDB=MockMerkleDB -// - -// Package merkledb is a generated GoMock package. -package merkledb - -import ( - context "context" - reflect "reflect" - - database "github.com/ava-labs/avalanchego/database" - ids "github.com/ava-labs/avalanchego/ids" - maybe "github.com/ava-labs/avalanchego/utils/maybe" - gomock "go.uber.org/mock/gomock" -) - -// MockMerkleDB is a mock of MerkleDB interface. -type MockMerkleDB struct { - ctrl *gomock.Controller - recorder *MockMerkleDBMockRecorder -} - -// MockMerkleDBMockRecorder is the mock recorder for MockMerkleDB. -type MockMerkleDBMockRecorder struct { - mock *MockMerkleDB -} - -// NewMockMerkleDB creates a new mock instance. -func NewMockMerkleDB(ctrl *gomock.Controller) *MockMerkleDB { - mock := &MockMerkleDB{ctrl: ctrl} - mock.recorder = &MockMerkleDBMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockMerkleDB) EXPECT() *MockMerkleDBMockRecorder { - return m.recorder -} - -// Clear mocks base method. -func (m *MockMerkleDB) Clear() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Clear") - ret0, _ := ret[0].(error) - return ret0 -} - -// Clear indicates an expected call of Clear. -func (mr *MockMerkleDBMockRecorder) Clear() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockMerkleDB)(nil).Clear)) -} - -// Close mocks base method. -func (m *MockMerkleDB) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockMerkleDBMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMerkleDB)(nil).Close)) -} - -// CommitChangeProof mocks base method. -func (m *MockMerkleDB) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitChangeProof", ctx, proof) - ret0, _ := ret[0].(error) - return ret0 -} - -// CommitChangeProof indicates an expected call of CommitChangeProof. -func (mr *MockMerkleDBMockRecorder) CommitChangeProof(ctx, proof any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitChangeProof), ctx, proof) -} - -// CommitRangeProof mocks base method. -func (m *MockMerkleDB) CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitRangeProof", ctx, start, end, proof) - ret0, _ := ret[0].(error) - return ret0 -} - -// CommitRangeProof indicates an expected call of CommitRangeProof. -func (mr *MockMerkleDBMockRecorder) CommitRangeProof(ctx, start, end, proof any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitRangeProof), ctx, start, end, proof) -} - -// Compact mocks base method. -func (m *MockMerkleDB) Compact(start, limit []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Compact", start, limit) - ret0, _ := ret[0].(error) - return ret0 -} - -// Compact indicates an expected call of Compact. -func (mr *MockMerkleDBMockRecorder) Compact(start, limit any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compact", reflect.TypeOf((*MockMerkleDB)(nil).Compact), start, limit) -} - -// Delete mocks base method. -func (m *MockMerkleDB) Delete(key []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", key) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockMerkleDBMockRecorder) Delete(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockMerkleDB)(nil).Delete), key) -} - -// Get mocks base method. -func (m *MockMerkleDB) Get(key []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockMerkleDBMockRecorder) Get(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMerkleDB)(nil).Get), key) -} - -// GetChangeProof mocks base method. -func (m *MockMerkleDB) GetChangeProof(ctx context.Context, startRootID, endRootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*ChangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChangeProof", ctx, startRootID, endRootID, start, end, maxLength) - ret0, _ := ret[0].(*ChangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChangeProof indicates an expected call of GetChangeProof. -func (mr *MockMerkleDBMockRecorder) GetChangeProof(ctx, startRootID, endRootID, start, end, maxLength any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetChangeProof), ctx, startRootID, endRootID, start, end, maxLength) -} - -// GetMerkleRoot mocks base method. -func (m *MockMerkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMerkleRoot", ctx) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMerkleRoot indicates an expected call of GetMerkleRoot. -func (mr *MockMerkleDBMockRecorder) GetMerkleRoot(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMerkleRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetMerkleRoot), ctx) -} - -// GetProof mocks base method. -func (m *MockMerkleDB) GetProof(ctx context.Context, keyBytes []byte) (*Proof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProof", ctx, keyBytes) - ret0, _ := ret[0].(*Proof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProof indicates an expected call of GetProof. -func (mr *MockMerkleDBMockRecorder) GetProof(ctx, keyBytes any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockMerkleDB)(nil).GetProof), ctx, keyBytes) -} - -// GetRangeProof mocks base method. -func (m *MockMerkleDB) GetRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRangeProof", ctx, start, end, maxLength) - ret0, _ := ret[0].(*RangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRangeProof indicates an expected call of GetRangeProof. -func (mr *MockMerkleDBMockRecorder) GetRangeProof(ctx, start, end, maxLength any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProof), ctx, start, end, maxLength) -} - -// GetRangeProofAtRoot mocks base method. -func (m *MockMerkleDB) GetRangeProofAtRoot(ctx context.Context, rootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRangeProofAtRoot", ctx, rootID, start, end, maxLength) - ret0, _ := ret[0].(*RangeProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRangeProofAtRoot indicates an expected call of GetRangeProofAtRoot. -func (mr *MockMerkleDBMockRecorder) GetRangeProofAtRoot(ctx, rootID, start, end, maxLength any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProofAtRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProofAtRoot), ctx, rootID, start, end, maxLength) -} - -// GetValue mocks base method. -func (m *MockMerkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValue", ctx, key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetValue indicates an expected call of GetValue. -func (mr *MockMerkleDBMockRecorder) GetValue(ctx, key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValue", reflect.TypeOf((*MockMerkleDB)(nil).GetValue), ctx, key) -} - -// GetValues mocks base method. -func (m *MockMerkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValues", ctx, keys) - ret0, _ := ret[0].([][]byte) - ret1, _ := ret[1].([]error) - return ret0, ret1 -} - -// GetValues indicates an expected call of GetValues. -func (mr *MockMerkleDBMockRecorder) GetValues(ctx, keys any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValues", reflect.TypeOf((*MockMerkleDB)(nil).GetValues), ctx, keys) -} - -// Has mocks base method. -func (m *MockMerkleDB) Has(key []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", key) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Has indicates an expected call of Has. -func (mr *MockMerkleDBMockRecorder) Has(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMerkleDB)(nil).Has), key) -} - -// HealthCheck mocks base method. -func (m *MockMerkleDB) HealthCheck(arg0 context.Context) (any, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(any) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HealthCheck indicates an expected call of HealthCheck. -func (mr *MockMerkleDBMockRecorder) HealthCheck(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockMerkleDB)(nil).HealthCheck), arg0) -} - -// NewBatch mocks base method. -func (m *MockMerkleDB) NewBatch() database.Batch { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBatch") - ret0, _ := ret[0].(database.Batch) - return ret0 -} - -// NewBatch indicates an expected call of NewBatch. -func (mr *MockMerkleDBMockRecorder) NewBatch() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockMerkleDB)(nil).NewBatch)) -} - -// NewIterator mocks base method. -func (m *MockMerkleDB) NewIterator() database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIterator") - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIterator indicates an expected call of NewIterator. -func (mr *MockMerkleDBMockRecorder) NewIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIterator", reflect.TypeOf((*MockMerkleDB)(nil).NewIterator)) -} - -// NewIteratorWithPrefix mocks base method. -func (m *MockMerkleDB) NewIteratorWithPrefix(prefix []byte) database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithPrefix", prefix) - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIteratorWithPrefix indicates an expected call of NewIteratorWithPrefix. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithPrefix(prefix any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithPrefix), prefix) -} - -// NewIteratorWithStart mocks base method. -func (m *MockMerkleDB) NewIteratorWithStart(start []byte) database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithStart", start) - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIteratorWithStart indicates an expected call of NewIteratorWithStart. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithStart(start any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStart", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStart), start) -} - -// NewIteratorWithStartAndPrefix mocks base method. -func (m *MockMerkleDB) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewIteratorWithStartAndPrefix", start, prefix) - ret0, _ := ret[0].(database.Iterator) - return ret0 -} - -// NewIteratorWithStartAndPrefix indicates an expected call of NewIteratorWithStartAndPrefix. -func (mr *MockMerkleDBMockRecorder) NewIteratorWithStartAndPrefix(start, prefix any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStartAndPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStartAndPrefix), start, prefix) -} - -// NewView mocks base method. -func (m *MockMerkleDB) NewView(ctx context.Context, changes ViewChanges) (View, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewView", ctx, changes) - ret0, _ := ret[0].(View) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewView indicates an expected call of NewView. -func (mr *MockMerkleDBMockRecorder) NewView(ctx, changes any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockMerkleDB)(nil).NewView), ctx, changes) -} - -// PrefetchPath mocks base method. -func (m *MockMerkleDB) PrefetchPath(key []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrefetchPath", key) - ret0, _ := ret[0].(error) - return ret0 -} - -// PrefetchPath indicates an expected call of PrefetchPath. -func (mr *MockMerkleDBMockRecorder) PrefetchPath(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPath", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPath), key) -} - -// PrefetchPaths mocks base method. -func (m *MockMerkleDB) PrefetchPaths(keys [][]byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrefetchPaths", keys) - ret0, _ := ret[0].(error) - return ret0 -} - -// PrefetchPaths indicates an expected call of PrefetchPaths. -func (mr *MockMerkleDBMockRecorder) PrefetchPaths(keys any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPaths", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPaths), keys) -} - -// Put mocks base method. -func (m *MockMerkleDB) Put(key, value []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Put", key, value) - ret0, _ := ret[0].(error) - return ret0 -} - -// Put indicates an expected call of Put. -func (mr *MockMerkleDBMockRecorder) Put(key, value any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMerkleDB)(nil).Put), key, value) -} - -// VerifyChangeProof mocks base method. -func (m *MockMerkleDB) VerifyChangeProof(ctx context.Context, proof *ChangeProof, start, end maybe.Maybe[[]byte], expectedEndRootID ids.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyChangeProof", ctx, proof, start, end, expectedEndRootID) - ret0, _ := ret[0].(error) - return ret0 -} - -// VerifyChangeProof indicates an expected call of VerifyChangeProof. -func (mr *MockMerkleDBMockRecorder) VerifyChangeProof(ctx, proof, start, end, expectedEndRootID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).VerifyChangeProof), ctx, proof, start, end, expectedEndRootID) -} - -// getEditableNode mocks base method. -func (m *MockMerkleDB) getEditableNode(key Key, hasValue bool) (*node, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getEditableNode", key, hasValue) - ret0, _ := ret[0].(*node) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// getEditableNode indicates an expected call of getEditableNode. -func (mr *MockMerkleDBMockRecorder) getEditableNode(key, hasValue any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getEditableNode", reflect.TypeOf((*MockMerkleDB)(nil).getEditableNode), key, hasValue) -} - -// getNode mocks base method. -func (m *MockMerkleDB) getNode(key Key, hasValue bool) (*node, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getNode", key, hasValue) - ret0, _ := ret[0].(*node) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// getNode indicates an expected call of getNode. -func (mr *MockMerkleDBMockRecorder) getNode(key, hasValue any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getNode", reflect.TypeOf((*MockMerkleDB)(nil).getNode), key, hasValue) -} - -// getRoot mocks base method. -func (m *MockMerkleDB) getRoot() maybe.Maybe[*node] { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getRoot") - ret0, _ := ret[0].(maybe.Maybe[*node]) - return ret0 -} - -// getRoot indicates an expected call of getRoot. -func (mr *MockMerkleDBMockRecorder) getRoot() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getRoot", reflect.TypeOf((*MockMerkleDB)(nil).getRoot)) -} - -// getTokenSize mocks base method. -func (m *MockMerkleDB) getTokenSize() int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getTokenSize") - ret0, _ := ret[0].(int) - return ret0 -} - -// getTokenSize indicates an expected call of getTokenSize. -func (mr *MockMerkleDBMockRecorder) getTokenSize() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getTokenSize", reflect.TypeOf((*MockMerkleDB)(nil).getTokenSize)) -} - -// getValue mocks base method. -func (m *MockMerkleDB) getValue(key Key) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getValue", key) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// getValue indicates an expected call of getValue. -func (mr *MockMerkleDBMockRecorder) getValue(key any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getValue", reflect.TypeOf((*MockMerkleDB)(nil).getValue), key) -} From 237f590866ae225f306482bf004126bc24560f38 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 7 Oct 2024 16:29:41 -0400 Subject: [PATCH 073/155] Allow xsvm to sign arbitrary warp messages --- vms/example/xsvm/vm.go | 44 ++++++++++++++++++++++++++++------------ vms/example/xsvm/warp.go | 18 ++++++++++++++++ 2 files changed, 49 insertions(+), 13 deletions(-) create mode 100644 vms/example/xsvm/warp.go diff --git a/vms/example/xsvm/vm.go b/vms/example/xsvm/vm.go index 526fc47c499d..967ee7dd6a03 100644 --- a/vms/example/xsvm/vm.go +++ b/vms/example/xsvm/vm.go @@ -9,17 +9,19 @@ import ( "net/http" "github.com/gorilla/rpc/v2" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/example/xsvm/api" "github.com/ava-labs/avalanchego/vms/example/xsvm/builder" "github.com/ava-labs/avalanchego/vms/example/xsvm/chain" @@ -37,7 +39,7 @@ var ( ) type VM struct { - common.AppHandler + *p2p.Network chainContext *snow.Context db database.Database @@ -57,14 +59,38 @@ func (vm *VM) Initialize( _ []byte, engineChan chan<- common.Message, _ []*common.Fx, - _ common.AppSender, + appSender common.AppSender, ) error { - vm.AppHandler = common.NewNoOpAppHandler(chainContext.Log) - chainContext.Log.Info("initializing xsvm", zap.Stringer("version", Version), ) + metrics := prometheus.NewRegistry() + err := chainContext.Metrics.Register("p2p", metrics) + if err != nil { + return err + } + + vm.Network, err = p2p.NewNetwork( + chainContext.Log, + appSender, + metrics, + "", + ) + if err != nil { + return err + } + + // Allow signing of all warp messages. This is not typically safe, but is + // allowed for this example. + acp118Handler := acp118.NewHandler( + acp118Verifier{}, + chainContext.WarpSigner, + ) + if err := vm.Network.AddHandler(p2p.SignatureRequestHandlerID, acp118Handler); err != nil { + return err + } + vm.chainContext = chainContext vm.db = db g, err := genesis.Parse(genesisBytes) @@ -132,14 +158,6 @@ func (*VM) HealthCheck(context.Context) (interface{}, error) { return http.StatusOK, nil } -func (*VM) Connected(context.Context, ids.NodeID, *version.Application) error { - return nil -} - -func (*VM) Disconnected(context.Context, ids.NodeID) error { - return nil -} - func (vm *VM) GetBlock(_ context.Context, blkID ids.ID) (snowman.Block, error) { return vm.chain.GetBlock(blkID) } diff --git a/vms/example/xsvm/warp.go b/vms/example/xsvm/warp.go new file mode 100644 index 000000000000..224945abd88a --- /dev/null +++ b/vms/example/xsvm/warp.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package xsvm + +import ( + "context" + + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +// acp118Verifier allows signing all warp messages +type acp118Verifier struct{} + +func (acp118Verifier) Verify(context.Context, *warp.UnsignedMessage, []byte) *common.AppError { + return nil +} From 198680418af3d43b87e76bbad05a2ae2413842f8 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 8 Oct 2024 20:44:31 -0400 Subject: [PATCH 074/155] nit cleanup --- vms/platformvm/state/state.go | 16 +++++++++----- vms/platformvm/state/subnet_only_validator.go | 22 +++++++++++++++++++ 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 129d210b120c..bede6daaea80 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2261,9 +2261,11 @@ func (s *state) writeSubnetOnlyValidators(updateValidators bool, height uint64) return err } - subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) - copy(subnetIDNodeIDKey, sov.SubnetID[:]) - copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + subnetIDNodeIDKey := subnetIDNodeID.Marshal() if err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey); err != nil { return err } @@ -2375,9 +2377,11 @@ func (s *state) writeSubnetOnlyValidators(updateValidators bool, height uint64) for validationID, sov := range sovChanges { validationID := validationID - subnetIDNodeIDKey := make([]byte, len(sov.SubnetID)+len(sov.NodeID)) - copy(subnetIDNodeIDKey, sov.SubnetID[:]) - copy(subnetIDNodeIDKey[len(sov.SubnetID):], sov.NodeID[:]) + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + subnetIDNodeIDKey := subnetIDNodeID.Marshal() if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { return err } diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 487471a5f2b7..180528bbb9e9 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -18,11 +18,16 @@ import ( safemath "github.com/ava-labs/avalanchego/utils/math" ) +// subnetIDNodeID = [subnetID] + [nodeID] +const subnetIDNodeIDEntryLength = ids.IDLen + ids.NodeIDLen + var ( _ btree.LessFunc[SubnetOnlyValidator] = SubnetOnlyValidator.Less ErrMutatedSubnetOnlyValidator = errors.New("subnet only validator contains mutated constant fields") ErrDuplicateSubnetOnlyValidator = errors.New("subnet only validator contains conflicting subnetID + nodeID pair") + + errUnexpectedSubnetIDNodeIDLength = fmt.Errorf("expected subnetID+nodeID entry length %d", subnetIDNodeIDEntryLength) ) type SubnetOnlyValidators interface { @@ -172,6 +177,23 @@ type subnetIDNodeID struct { nodeID ids.NodeID } +func (s *subnetIDNodeID) Marshal() []byte { + data := make([]byte, subnetIDNodeIDEntryLength) + copy(data, s.subnetID[:]) + copy(data[ids.IDLen:], s.nodeID[:]) + return data +} + +func (s *subnetIDNodeID) Unmarshal(data []byte) error { + if len(data) != subnetIDNodeIDEntryLength { + return errUnexpectedSubnetIDNodeIDLength + } + + copy(s.subnetID[:], data) + copy(s.nodeID[:], data[ids.IDLen:]) + return nil +} + type subnetOnlyValidatorsDiff struct { numAddedActive int // May be negative modifiedTotalWeight map[ids.ID]uint64 // subnetID -> totalWeight From 827997222c6fd4b947d219ec7588b22c95f86be9 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 9 Oct 2024 18:38:02 -0400 Subject: [PATCH 075/155] Remove unexpected block unwrapping --- vms/platformvm/block/executor/verifier.go | 56 ++++++++++++++----- .../block/executor/verifier_test.go | 5 +- 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/vms/platformvm/block/executor/verifier.go b/vms/platformvm/block/executor/verifier.go index abcbc566a303..38320050af21 100644 --- a/vms/platformvm/block/executor/verifier.go +++ b/vms/platformvm/block/executor/verifier.go @@ -90,7 +90,8 @@ func (v *verifier) BanffProposalBlock(b *block.BanffProposalBlock) error { } return v.proposalBlock( - &b.ApricotProposalBlock, + b, + b.Tx, onDecisionState, onCommitState, onAbortState, @@ -129,7 +130,12 @@ func (v *verifier) BanffStandardBlock(b *block.BanffStandardBlock) error { } feeCalculator := state.PickFeeCalculator(v.txExecutorBackend.Config, onAcceptState) - return v.standardBlock(&b.ApricotStandardBlock, feeCalculator, onAcceptState) + return v.standardBlock( + b, + b.Transactions, + feeCalculator, + onAcceptState, + ) } func (v *verifier) ApricotAbortBlock(b *block.ApricotAbortBlock) error { @@ -165,7 +171,17 @@ func (v *verifier) ApricotProposalBlock(b *block.ApricotProposalBlock) error { timestamp = onCommitState.GetTimestamp() // Equal to parent timestamp feeCalculator = state.NewStaticFeeCalculator(v.txExecutorBackend.Config, timestamp) ) - return v.proposalBlock(b, nil, onCommitState, onAbortState, feeCalculator, nil, nil, nil) + return v.proposalBlock( + b, + b.Tx, + nil, + onCommitState, + onAbortState, + feeCalculator, + nil, + nil, + nil, + ) } func (v *verifier) ApricotStandardBlock(b *block.ApricotStandardBlock) error { @@ -183,7 +199,12 @@ func (v *verifier) ApricotStandardBlock(b *block.ApricotStandardBlock) error { timestamp = onAcceptState.GetTimestamp() // Equal to parent timestamp feeCalculator = state.NewStaticFeeCalculator(v.txExecutorBackend.Config, timestamp) ) - return v.standardBlock(b, feeCalculator, onAcceptState) + return v.standardBlock( + b, + b.Transactions, + feeCalculator, + onAcceptState, + ) } func (v *verifier) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { @@ -360,7 +381,8 @@ func (v *verifier) commitBlock(b block.Block) error { // proposalBlock populates the state of this block if [nil] is returned func (v *verifier) proposalBlock( - b *block.ApricotProposalBlock, + b block.Block, + tx *txs.Tx, onDecisionState state.Diff, onCommitState state.Diff, onAbortState state.Diff, @@ -374,19 +396,19 @@ func (v *verifier) proposalBlock( OnAbortState: onAbortState, Backend: v.txExecutorBackend, FeeCalculator: feeCalculator, - Tx: b.Tx, + Tx: tx, } - if err := b.Tx.Unsigned.Visit(&txExecutor); err != nil { - txID := b.Tx.ID() + if err := tx.Unsigned.Visit(&txExecutor); err != nil { + txID := tx.ID() v.MarkDropped(txID, err) // cache tx as dropped return err } - onCommitState.AddTx(b.Tx, status.Committed) - onAbortState.AddTx(b.Tx, status.Aborted) + onCommitState.AddTx(tx, status.Committed) + onAbortState.AddTx(tx, status.Aborted) - v.Mempool.Remove(b.Tx) + v.Mempool.Remove(tx) blkID := b.ID() v.blkIDToState[blkID] = &blockState{ @@ -413,16 +435,22 @@ func (v *verifier) proposalBlock( // standardBlock populates the state of this block if [nil] is returned func (v *verifier) standardBlock( - b *block.ApricotStandardBlock, + b block.Block, + txs []*txs.Tx, feeCalculator fee.Calculator, onAcceptState state.Diff, ) error { - inputs, atomicRequests, onAcceptFunc, err := v.processStandardTxs(b.Transactions, feeCalculator, onAcceptState, b.Parent()) + inputs, atomicRequests, onAcceptFunc, err := v.processStandardTxs( + txs, + feeCalculator, + onAcceptState, + b.Parent(), + ) if err != nil { return err } - v.Mempool.Remove(b.Transactions...) + v.Mempool.Remove(txs...) blkID := b.ID() v.blkIDToState[blkID] = &blockState{ diff --git a/vms/platformvm/block/executor/verifier_test.go b/vms/platformvm/block/executor/verifier_test.go index 5b786b0e33d8..f57b8fb4ed58 100644 --- a/vms/platformvm/block/executor/verifier_test.go +++ b/vms/platformvm/block/executor/verifier_test.go @@ -1184,8 +1184,9 @@ func TestBlockExecutionWithComplexity(t *testing.T) { } require.Contains(verifier.blkIDToState, blkID) - onAcceptState := verifier.blkIDToState[blkID].onAcceptState - require.Equal(test.expectedFeeState, onAcceptState.GetFeeState()) + blockState := verifier.blkIDToState[blkID] + require.Equal(blk, blockState.statelessBlock) + require.Equal(test.expectedFeeState, blockState.onAcceptState.GetFeeState()) }) } } From 702639ae1ccb19ad3dbefd88e50dadfb2a2f36cf Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 13:38:28 -0400 Subject: [PATCH 076/155] ACP-77: Add ConversionID to state --- vms/platformvm/client.go | 4 +- vms/platformvm/config/execution_config.go | 4 +- .../config/execution_config_test.go | 2 +- vms/platformvm/service.go | 6 +- vms/platformvm/state/diff.go | 43 +++++----- vms/platformvm/state/diff_test.go | 68 ++++++++++----- vms/platformvm/state/mock_chain.go | 31 +++---- vms/platformvm/state/mock_diff.go | 31 +++---- vms/platformvm/state/mock_state.go | 45 +++++----- vms/platformvm/state/state.go | 86 ++++++++++--------- vms/platformvm/state/state_test.go | 60 +++++++------ .../txs/executor/create_chain_test.go | 7 +- .../txs/executor/staker_tx_verification.go | 2 +- .../txs/executor/standard_tx_executor.go | 3 +- .../txs/executor/standard_tx_executor_test.go | 21 +++-- .../txs/executor/subnet_tx_verification.go | 2 +- 16 files changed, 235 insertions(+), 180 deletions(-) diff --git a/vms/platformvm/client.go b/vms/platformvm/client.go index 54554f37d92b..522d359055d8 100644 --- a/vms/platformvm/client.go +++ b/vms/platformvm/client.go @@ -235,7 +235,8 @@ type GetSubnetClientResponse struct { Locktime uint64 // subnet transformation tx ID for a permissionless subnet SubnetTransformationTxID ids.ID - // subnet manager information for a permissionless L1 + // subnet conversion information for a permissionless L1 + ConversionID ids.ID ManagerChainID ids.ID ManagerAddress []byte } @@ -259,6 +260,7 @@ func (c *client) GetSubnet(ctx context.Context, subnetID ids.ID, options ...rpc. Threshold: uint32(res.Threshold), Locktime: uint64(res.Locktime), SubnetTransformationTxID: res.SubnetTransformationTxID, + ConversionID: res.ConversionID, ManagerChainID: res.ManagerChainID, ManagerAddress: res.ManagerAddress, }, nil diff --git a/vms/platformvm/config/execution_config.go b/vms/platformvm/config/execution_config.go index fafdaf9b99d2..e5bef1637d05 100644 --- a/vms/platformvm/config/execution_config.go +++ b/vms/platformvm/config/execution_config.go @@ -21,7 +21,7 @@ var DefaultExecutionConfig = ExecutionConfig{ ChainDBCacheSize: 2048, BlockIDCacheSize: 8192, FxOwnerCacheSize: 4 * units.MiB, - SubnetManagerCacheSize: 4 * units.MiB, + SubnetConversionCacheSize: 4 * units.MiB, ChecksumsEnabled: false, MempoolPruneFrequency: 30 * time.Minute, } @@ -37,7 +37,7 @@ type ExecutionConfig struct { ChainDBCacheSize int `json:"chain-db-cache-size"` BlockIDCacheSize int `json:"block-id-cache-size"` FxOwnerCacheSize int `json:"fx-owner-cache-size"` - SubnetManagerCacheSize int `json:"subnet-manager-cache-size"` + SubnetConversionCacheSize int `json:"subnet-conversion-cache-size"` ChecksumsEnabled bool `json:"checksums-enabled"` MempoolPruneFrequency time.Duration `json:"mempool-prune-frequency"` } diff --git a/vms/platformvm/config/execution_config_test.go b/vms/platformvm/config/execution_config_test.go index 5929a75f9063..c938c177add3 100644 --- a/vms/platformvm/config/execution_config_test.go +++ b/vms/platformvm/config/execution_config_test.go @@ -89,7 +89,7 @@ func TestExecutionConfigUnmarshal(t *testing.T) { ChainDBCacheSize: 7, BlockIDCacheSize: 8, FxOwnerCacheSize: 9, - SubnetManagerCacheSize: 10, + SubnetConversionCacheSize: 10, ChecksumsEnabled: true, MempoolPruneFrequency: time.Minute, } diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 91b7810d30df..b7111318c02a 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -439,7 +439,8 @@ type GetSubnetResponse struct { Locktime avajson.Uint64 `json:"locktime"` // subnet transformation tx ID for an elastic subnet SubnetTransformationTxID ids.ID `json:"subnetTransformationTxID"` - // subnet manager information for a permissionless L1 + // subnet conversion information for a permissionless L1 + ConversionID ids.ID `json:"conversionID"` ManagerChainID ids.ID `json:"managerChainID"` ManagerAddress types.JSONByteSlice `json:"managerAddress"` } @@ -490,9 +491,10 @@ func (s *Service) GetSubnet(_ *http.Request, args *GetSubnetArgs, response *GetS return err } - switch chainID, addr, err := s.vm.state.GetSubnetManager(args.SubnetID); err { + switch conversionID, chainID, addr, err := s.vm.state.GetSubnetConversion(args.SubnetID); err { case nil: response.IsPermissioned = false + response.ConversionID = conversionID response.ManagerChainID = chainID response.ManagerAddress = addr case database.ErrNotFound: diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 24bdabfa96da..579b10bb28db 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -52,8 +52,8 @@ type diff struct { addedSubnetIDs []ids.ID // Subnet ID --> Owner of the subnet subnetOwners map[ids.ID]fx.Owner - // Subnet ID --> Manager of the subnet - subnetManagers map[ids.ID]chainIDAndAddr + // Subnet ID --> Conversion of the subnet + subnetConversions map[ids.ID]subnetConversion // Subnet ID --> Tx that transforms the subnet transformedSubnets map[ids.ID]*txs.Tx @@ -76,14 +76,14 @@ func NewDiff( return nil, fmt.Errorf("%w: %s", ErrMissingParentState, parentID) } return &diff{ - parentID: parentID, - stateVersions: stateVersions, - timestamp: parentState.GetTimestamp(), - feeState: parentState.GetFeeState(), - accruedFees: parentState.GetAccruedFees(), - expiryDiff: newExpiryDiff(), - subnetOwners: make(map[ids.ID]fx.Owner), - subnetManagers: make(map[ids.ID]chainIDAndAddr), + parentID: parentID, + stateVersions: stateVersions, + timestamp: parentState.GetTimestamp(), + feeState: parentState.GetFeeState(), + accruedFees: parentState.GetAccruedFees(), + expiryDiff: newExpiryDiff(), + subnetOwners: make(map[ids.ID]fx.Owner), + subnetConversions: make(map[ids.ID]subnetConversion), }, nil } @@ -357,23 +357,24 @@ func (d *diff) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { d.subnetOwners[subnetID] = owner } -func (d *diff) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { - if manager, exists := d.subnetManagers[subnetID]; exists { - return manager.ChainID, manager.Addr, nil +func (d *diff) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { + if c, ok := d.subnetConversions[subnetID]; ok { + return c.ConversionID, c.ChainID, c.Addr, nil } // If the subnet manager was not assigned in this diff, ask the parent state. parentState, ok := d.stateVersions.GetState(d.parentID) if !ok { - return ids.Empty, nil, ErrMissingParentState + return ids.Empty, ids.Empty, nil, ErrMissingParentState } - return parentState.GetSubnetManager(subnetID) + return parentState.GetSubnetConversion(subnetID) } -func (d *diff) SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) { - d.subnetManagers[subnetID] = chainIDAndAddr{ - ChainID: chainID, - Addr: addr, +func (d *diff) SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { + d.subnetConversions[subnetID] = subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, } } @@ -576,8 +577,8 @@ func (d *diff) Apply(baseState Chain) error { for subnetID, owner := range d.subnetOwners { baseState.SetSubnetOwner(subnetID, owner) } - for subnetID, manager := range d.subnetManagers { - baseState.SetSubnetManager(subnetID, manager.ChainID, manager.Addr) + for subnetID, c := range d.subnetConversions { + baseState.SetSubnetConversion(subnetID, c.ConversionID, c.ChainID, c.Addr) } return nil } diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 3b091cd67a69..408ac9fe1cbf 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -774,44 +774,68 @@ func TestDiffSubnetOwner(t *testing.T) { func TestDiffSubnetManager(t *testing.T) { var ( - require = require.New(t) - state = newTestState(t, memdb.New()) - newManager = chainIDAndAddr{ids.GenerateTestID(), []byte{1, 2, 3, 4}} - subnetID = ids.GenerateTestID() + require = require.New(t) + state = newTestState(t, memdb.New()) + expectedConversion = subnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: []byte{1, 2, 3, 4}, + } + subnetID = ids.GenerateTestID() ) - chainID, addr, err := state.GetSubnetManager(subnetID) + conversionID, chainID, addr, err := state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) + require.Zero(subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }) d, err := NewDiffOn(state) require.NoError(err) - chainID, addr, err = d.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = d.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) + require.Zero(subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }) - // Setting a subnet manager should be reflected on diff not state - d.SetSubnetManager(subnetID, newManager.ChainID, newManager.Addr) - chainID, addr, err = d.GetSubnetManager(subnetID) + // Setting a subnet conversion should be reflected on diff not state + d.SetSubnetConversion(subnetID, expectedConversion.ConversionID, expectedConversion.ChainID, expectedConversion.Addr) + conversionID, chainID, addr, err = d.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal(newManager.ChainID, chainID) - require.Equal(newManager.Addr, addr) + require.Equal( + expectedConversion, + subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }, + ) - chainID, addr, err = state.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) + require.Zero(subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }) // State should reflect new subnet manager after diff is applied require.NoError(d.Apply(state)) - - chainID, addr, err = state.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal(newManager.ChainID, chainID) - require.Equal(newManager.Addr, addr) + require.Equal( + expectedConversion, + subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }, + ) } func TestDiffStacking(t *testing.T) { diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 3b380a87a8b8..f4f8dc661b90 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -353,20 +353,21 @@ func (mr *MockChainMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), subnetID, nodeID) } -// GetSubnetManager mocks base method. -func (m *MockChain) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { +// GetSubnetConversion mocks base method. +func (m *MockChain) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetManager", subnetID) + ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret1, _ := ret[1].(ids.ID) + ret2, _ := ret[2].([]byte) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 } -// GetSubnetManager indicates an expected call of GetSubnetManager. -func (mr *MockChainMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { +// GetSubnetConversion indicates an expected call of GetSubnetConversion. +func (mr *MockChainMockRecorder) GetSubnetConversion(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockChain)(nil).GetSubnetManager), subnetID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetConversion", reflect.TypeOf((*MockChain)(nil).GetSubnetConversion), subnetID) } // GetSubnetOwner mocks base method. @@ -573,16 +574,16 @@ func (mr *MockChainMockRecorder) SetFeeState(f any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeState", reflect.TypeOf((*MockChain)(nil).SetFeeState), f) } -// SetSubnetManager mocks base method. -func (m *MockChain) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { +// SetSubnetConversion mocks base method. +func (m *MockChain) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetManager", subnetID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) } -// SetSubnetManager indicates an expected call of SetSubnetManager. -func (mr *MockChainMockRecorder) SetSubnetManager(subnetID, chainID, addr any) *gomock.Call { +// SetSubnetConversion indicates an expected call of SetSubnetConversion. +func (mr *MockChainMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetManager", reflect.TypeOf((*MockChain)(nil).SetSubnetManager), subnetID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockChain)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index 77edfde92aaf..bbeed71080b1 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -367,20 +367,21 @@ func (mr *MockDiffMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), subnetID, nodeID) } -// GetSubnetManager mocks base method. -func (m *MockDiff) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { +// GetSubnetConversion mocks base method. +func (m *MockDiff) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetManager", subnetID) + ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret1, _ := ret[1].(ids.ID) + ret2, _ := ret[2].([]byte) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 } -// GetSubnetManager indicates an expected call of GetSubnetManager. -func (mr *MockDiffMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { +// GetSubnetConversion indicates an expected call of GetSubnetConversion. +func (mr *MockDiffMockRecorder) GetSubnetConversion(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockDiff)(nil).GetSubnetManager), subnetID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetConversion", reflect.TypeOf((*MockDiff)(nil).GetSubnetConversion), subnetID) } // GetSubnetOwner mocks base method. @@ -587,16 +588,16 @@ func (mr *MockDiffMockRecorder) SetFeeState(f any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeState", reflect.TypeOf((*MockDiff)(nil).SetFeeState), f) } -// SetSubnetManager mocks base method. -func (m *MockDiff) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { +// SetSubnetConversion mocks base method. +func (m *MockDiff) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetManager", subnetID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) } -// SetSubnetManager indicates an expected call of SetSubnetManager. -func (mr *MockDiffMockRecorder) SetSubnetManager(subnetID, chainID, addr any) *gomock.Call { +// SetSubnetConversion indicates an expected call of SetSubnetConversion. +func (mr *MockDiffMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetManager", reflect.TypeOf((*MockDiff)(nil).SetSubnetManager), subnetID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockDiff)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index f602345688b1..ed44205d9b49 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -557,6 +557,23 @@ func (mr *MockStateMockRecorder) GetStatelessBlock(blockID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockState)(nil).GetStatelessBlock), blockID) } +// GetSubnetConversion mocks base method. +func (m *MockState) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(ids.ID) + ret2, _ := ret[2].([]byte) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// GetSubnetConversion indicates an expected call of GetSubnetConversion. +func (mr *MockStateMockRecorder) GetSubnetConversion(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetConversion", reflect.TypeOf((*MockState)(nil).GetSubnetConversion), subnetID) +} + // GetSubnetIDs mocks base method. func (m *MockState) GetSubnetIDs() ([]ids.ID, error) { m.ctrl.T.Helper() @@ -572,22 +589,6 @@ func (mr *MockStateMockRecorder) GetSubnetIDs() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetIDs", reflect.TypeOf((*MockState)(nil).GetSubnetIDs)) } -// GetSubnetManager mocks base method. -func (m *MockState) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetManager", subnetID) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].([]byte) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetSubnetManager indicates an expected call of GetSubnetManager. -func (mr *MockStateMockRecorder) GetSubnetManager(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetManager", reflect.TypeOf((*MockState)(nil).GetSubnetManager), subnetID) -} - // GetSubnetOwner mocks base method. func (m *MockState) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -846,16 +847,16 @@ func (mr *MockStateMockRecorder) SetLastAccepted(blkID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), blkID) } -// SetSubnetManager mocks base method. -func (m *MockState) SetSubnetManager(subnetID, chainID ids.ID, addr []byte) { +// SetSubnetConversion mocks base method. +func (m *MockState) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetManager", subnetID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) } -// SetSubnetManager indicates an expected call of SetSubnetManager. -func (mr *MockStateMockRecorder) SetSubnetManager(subnetID, chainID, addr any) *gomock.Call { +// SetSubnetConversion indicates an expected call of SetSubnetConversion. +func (mr *MockStateMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetManager", reflect.TypeOf((*MockState)(nil).SetSubnetManager), subnetID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockState)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index f351bf5940b2..c839d4b7034b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -78,7 +78,7 @@ var ( UTXOPrefix = []byte("utxo") SubnetPrefix = []byte("subnet") SubnetOwnerPrefix = []byte("subnetOwner") - SubnetManagerPrefix = []byte("subnetManager") + SubnetConversionPrefix = []byte("subnetConversion") TransformedSubnetPrefix = []byte("transformedSubnet") SupplyPrefix = []byte("supply") ChainPrefix = []byte("chain") @@ -123,8 +123,8 @@ type Chain interface { GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) - GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) - SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) + GetSubnetConversion(subnetID ids.ID) (conversionID ids.ID, chainID ids.ID, addr []byte, err error) + SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) AddSubnetTransformation(transformSubnetTx *txs.Tx) @@ -275,7 +275,9 @@ type stateBlk struct { * | '-. list * | '-- txID -> nil * |-. subnetOwners - * | '-. subnetID -> owner + * | '-- subnetID -> owner + * |-. subnetConversions + * | '-- subnetID -> conversionID + chainID + addr * |-. chains * | '-. subnetID * | '-. list @@ -364,9 +366,9 @@ type state struct { subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner; if the entry is nil, it is not in the database subnetOwnerDB database.Database - subnetManagers map[ids.ID]chainIDAndAddr // map of subnetID -> manager of the subnet - subnetManagerCache cache.Cacher[ids.ID, chainIDAndAddr] // cache of subnetID -> manager - subnetManagerDB database.Database + subnetConversions map[ids.ID]subnetConversion // map of subnetID -> conversion of the subnet + subnetConversionCache cache.Cacher[ids.ID, subnetConversion] // cache of subnetID -> conversion + subnetConversionDB database.Database transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx; if the entry is nil, it is not in the database @@ -439,9 +441,10 @@ type fxOwnerAndSize struct { size int } -type chainIDAndAddr struct { - ChainID ids.ID `serialize:"true"` - Addr []byte `serialize:"true"` +type subnetConversion struct { + ConversionID ids.ID `serialize:"true"` + ChainID ids.ID `serialize:"true"` + Addr []byte `serialize:"true"` } func txSize(_ ids.ID, tx *txs.Tx) int { @@ -552,12 +555,12 @@ func New( return nil, err } - subnetManagerDB := prefixdb.New(SubnetManagerPrefix, baseDB) - subnetManagerCache, err := metercacher.New[ids.ID, chainIDAndAddr]( + subnetConversionDB := prefixdb.New(SubnetConversionPrefix, baseDB) + subnetConversionCache, err := metercacher.New[ids.ID, subnetConversion]( "subnet_manager_cache", metricsReg, - cache.NewSizedLRU[ids.ID, chainIDAndAddr](execCfg.SubnetManagerCacheSize, func(_ ids.ID, f chainIDAndAddr) int { - return 2*ids.IDLen + len(f.Addr) + cache.NewSizedLRU[ids.ID, subnetConversion](execCfg.SubnetConversionCacheSize, func(_ ids.ID, c subnetConversion) int { + return 3*ids.IDLen + len(c.Addr) }), ) if err != nil { @@ -666,9 +669,9 @@ func New( subnetOwnerDB: subnetOwnerDB, subnetOwnerCache: subnetOwnerCache, - subnetManagers: make(map[ids.ID]chainIDAndAddr), - subnetManagerDB: subnetManagerDB, - subnetManagerCache: subnetManagerCache, + subnetConversions: make(map[ids.ID]subnetConversion), + subnetConversionDB: subnetConversionDB, + subnetConversionCache: subnetConversionCache, transformedSubnets: make(map[ids.ID]*txs.Tx), transformedSubnetCache: transformedSubnetCache, @@ -856,32 +859,33 @@ func (s *state) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { s.subnetOwners[subnetID] = owner } -func (s *state) GetSubnetManager(subnetID ids.ID) (ids.ID, []byte, error) { - if chainIDAndAddr, exists := s.subnetManagers[subnetID]; exists { - return chainIDAndAddr.ChainID, chainIDAndAddr.Addr, nil +func (s *state) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { + if c, ok := s.subnetConversions[subnetID]; ok { + return c.ConversionID, c.ChainID, c.Addr, nil } - if chainIDAndAddr, cached := s.subnetManagerCache.Get(subnetID); cached { - return chainIDAndAddr.ChainID, chainIDAndAddr.Addr, nil + if c, ok := s.subnetConversionCache.Get(subnetID); ok { + return c.ConversionID, c.ChainID, c.Addr, nil } - chainIDAndAddrBytes, err := s.subnetManagerDB.Get(subnetID[:]) + bytes, err := s.subnetConversionDB.Get(subnetID[:]) if err != nil { - return ids.Empty, nil, err + return ids.Empty, ids.Empty, nil, err } - var manager chainIDAndAddr - if _, err := block.GenesisCodec.Unmarshal(chainIDAndAddrBytes, &manager); err != nil { - return ids.Empty, nil, err + var c subnetConversion + if _, err := block.GenesisCodec.Unmarshal(bytes, &c); err != nil { + return ids.Empty, ids.Empty, nil, err } - s.subnetManagerCache.Put(subnetID, manager) - return manager.ChainID, manager.Addr, nil + s.subnetConversionCache.Put(subnetID, c) + return c.ConversionID, c.ChainID, c.Addr, nil } -func (s *state) SetSubnetManager(subnetID ids.ID, chainID ids.ID, addr []byte) { - s.subnetManagers[subnetID] = chainIDAndAddr{ - ChainID: chainID, - Addr: addr, +func (s *state) SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { + s.subnetConversions[subnetID] = subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, } } @@ -1765,7 +1769,7 @@ func (s *state) write(updateValidators bool, height uint64) error { s.writeUTXOs(), s.writeSubnets(), s.writeSubnetOwners(), - s.writeSubnetManagers(), + s.writeSubnetConversions(), s.writeTransformedSubnets(), s.writeSubnetSupplies(), s.writeChains(), @@ -2364,20 +2368,20 @@ func (s *state) writeSubnetOwners() error { return nil } -func (s *state) writeSubnetManagers() error { - for subnetID, manager := range s.subnetManagers { +func (s *state) writeSubnetConversions() error { + for subnetID, c := range s.subnetConversions { subnetID := subnetID - manager := manager - delete(s.subnetManagers, subnetID) + c := c + delete(s.subnetConversions, subnetID) - managerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &manager) + bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &c) if err != nil { return fmt.Errorf("failed to marshal subnet manager: %w", err) } - s.subnetManagerCache.Put(subnetID, manager) + s.subnetConversionCache.Put(subnetID, c) - if err := s.subnetManagerDB.Put(subnetID[:], managerBytes); err != nil { + if err := s.subnetConversionDB.Put(subnetID[:], bytes); err != nil { return fmt.Errorf("failed to write subnet manager: %w", err) } } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 0f23a3ebc833..ea13a2dd30ac 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1323,52 +1323,58 @@ func TestStateSubnetOwner(t *testing.T) { require.Equal(owner2, owner) } -func TestStateSubnetManager(t *testing.T) { +func TestStateSubnetConversion(t *testing.T) { tests := []struct { name string - setup func(t *testing.T, s State, subnetID ids.ID, chainID ids.ID, addr []byte) + setup func(s *state, subnetID ids.ID, c subnetConversion) }{ { name: "in-memory", - setup: func(_ *testing.T, s State, subnetID ids.ID, chainID ids.ID, addr []byte) { - s.SetSubnetManager(subnetID, chainID, addr) + setup: func(s *state, subnetID ids.ID, c subnetConversion) { + s.SetSubnetConversion(subnetID, c.ConversionID, c.ChainID, c.Addr) }, }, { name: "cache", - setup: func(t *testing.T, s State, subnetID ids.ID, chainID ids.ID, addr []byte) { - subnetManagerCache := s.(*state).subnetManagerCache - - require.Zero(t, subnetManagerCache.Len()) - subnetManagerCache.Put(subnetID, chainIDAndAddr{ - ChainID: chainID, - Addr: addr, - }) - require.Equal(t, 1, subnetManagerCache.Len()) + setup: func(s *state, subnetID ids.ID, c subnetConversion) { + s.subnetConversionCache.Flush() + s.subnetConversionCache.Put(subnetID, c) }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - initializedState := newTestState(t, memdb.New()) + var ( + require = require.New(t) + initializedState = newTestState(t, memdb.New()) + subnetID = ids.GenerateTestID() + expectedConversion = subnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: []byte{'a', 'd', 'd', 'r'}, + } + ) - subnetID := ids.GenerateTestID() - chainID, addr, err := initializedState.GetSubnetManager(subnetID) + conversionID, chainID, addr, err := initializedState.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Equal(ids.Empty, chainID) - require.Nil(addr) - - expectedChainID := ids.GenerateTestID() - expectedAddr := []byte{'a', 'd', 'd', 'r'} + require.Zero(subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }) - test.setup(t, initializedState, subnetID, expectedChainID, expectedAddr) + test.setup(initializedState, subnetID, expectedConversion) - chainID, addr, err = initializedState.GetSubnetManager(subnetID) + conversionID, chainID, addr, err = initializedState.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal(expectedChainID, chainID) - require.Equal(expectedAddr, addr) + require.Equal( + expectedConversion, + subnetConversion{ + ConversionID: conversionID, + ChainID: chainID, + Addr: addr, + }, + ) }) } } diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 61fead2677a7..cf3e0e8d2cf2 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -286,7 +286,12 @@ func TestEtnaCreateChainTxInvalidWithManagedSubnet(t *testing.T) { builderDiff, err := state.NewDiffOn(stateDiff) require.NoError(err) - stateDiff.SetSubnetManager(subnetID, ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}) + stateDiff.SetSubnetConversion( + subnetID, + ids.GenerateTestID(), + ids.GenerateTestID(), + []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, + ) feeCalculator := state.PickFeeCalculator(env.config, builderDiff) executor := StandardTxExecutor{ diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index d458c01ab259..69b8cd567586 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -308,7 +308,7 @@ func verifyRemoveSubnetValidatorTx( } if backend.Config.UpgradeConfig.IsEtnaActivated(currentTimestamp) { - _, _, err := chainState.GetSubnetManager(tx.Subnet) + _, _, _, err := chainState.GetSubnetConversion(tx.Subnet) if err == nil { return nil, false, fmt.Errorf("%w: %q", ErrRemoveValidatorManagedSubnet, tx.Subnet) } diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index d0a37f5ba82c..14069b1e4e43 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -542,7 +542,8 @@ func (e *StandardTxExecutor) ConvertSubnetTx(tx *txs.ConvertSubnetTx) error { // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) // Set the new Subnet manager in the database - e.State.SetSubnetManager(tx.Subnet, tx.ChainID, tx.Address) + // TODO: Populate the conversionID + e.State.SetSubnetConversion(tx.Subnet, ids.Empty, tx.ChainID, tx.Address) return nil } diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index e11ad73ac5d9..837af0ba73a5 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -913,7 +913,12 @@ func TestEtnaStandardTxExecutorAddSubnetValidator(t *testing.T) { onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - onAcceptState.SetSubnetManager(subnetID, ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}) + onAcceptState.SetSubnetConversion( + subnetID, + ids.GenerateTestID(), + ids.GenerateTestID(), + []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, + ) executor := StandardTxExecutor{ Backend: &env.backend, @@ -1994,7 +1999,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { name: "attempted to remove subnet validator after subnet manager is set", newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, nil).AnyTimes() + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, nil).AnyTimes() env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() cfg := &config.Config{ @@ -2245,7 +2250,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.Empty, nil, database.ErrNotFound).Times(1) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.Empty, ids.Empty, nil, database.ErrNotFound).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) env.flowChecker.EXPECT().VerifySpend( @@ -2284,7 +2289,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), make([]byte, 20), nil) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), ids.GenerateTestID(), make([]byte, 20), nil) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) @@ -2319,7 +2324,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) - env.state.EXPECT().GetSubnetManager(env.unsignedTx.Subnet).Return(ids.Empty, nil, database.ErrNotFound).Times(1) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.Empty, ids.Empty, nil, database.ErrNotFound).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) env.flowChecker.EXPECT().VerifySpend( @@ -2478,7 +2483,7 @@ func TestStandardExecutorConvertSubnetTx(t *testing.T) { { name: "invalid if subnet is converted", updateExecutor: func(e *StandardTxExecutor) { - e.State.SetSubnetManager(subnetID, ids.GenerateTestID(), nil) + e.State.SetSubnetConversion(subnetID, ids.GenerateTestID(), ids.GenerateTestID(), nil) }, expectedErr: errIsImmutable, }, @@ -2566,8 +2571,10 @@ func TestStandardExecutorConvertSubnetTx(t *testing.T) { require.Equal(expectedUTXO, utxo) } - stateChainID, stateAddress, err := diff.GetSubnetManager(subnetID) + stateConversionID, stateChainID, stateAddress, err := diff.GetSubnetConversion(subnetID) require.NoError(err) + // TODO: Update this test when we populate the correct conversionID + require.Zero(stateConversionID) require.Equal(chainID, stateChainID) require.Equal(address, stateAddress) }) diff --git a/vms/platformvm/txs/executor/subnet_tx_verification.go b/vms/platformvm/txs/executor/subnet_tx_verification.go index 6e5ecb9a34f5..7466fd78227e 100644 --- a/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -43,7 +43,7 @@ func verifyPoASubnetAuthorization( return nil, err } - _, _, err = chainState.GetSubnetManager(subnetID) + _, _, _, err = chainState.GetSubnetConversion(subnetID) if err == nil { return nil, fmt.Errorf("%q %w", subnetID, errIsImmutable) } From 4b562f05e10ecd9c881f01db8799bb82ec92e1c0 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 13:45:02 -0400 Subject: [PATCH 077/155] nit --- vms/platformvm/state/diff_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 408ac9fe1cbf..3bb156c5096b 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -772,7 +772,7 @@ func TestDiffSubnetOwner(t *testing.T) { require.Equal(owner2, owner) } -func TestDiffSubnetManager(t *testing.T) { +func TestDiffSubnetConversion(t *testing.T) { var ( require = require.New(t) state = newTestState(t, memdb.New()) From 7f3dd5e39afe01672de659518671e36f1c2039bf Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 13:49:13 -0400 Subject: [PATCH 078/155] nit --- vms/platformvm/state/diff.go | 2 +- vms/platformvm/state/diff_test.go | 2 +- vms/platformvm/state/state.go | 4 ++-- vms/platformvm/txs/executor/standard_tx_executor.go | 2 +- vms/platformvm/txs/executor/standard_tx_executor_test.go | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 579b10bb28db..e2cfd648296d 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -362,7 +362,7 @@ func (d *diff) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, err return c.ConversionID, c.ChainID, c.Addr, nil } - // If the subnet manager was not assigned in this diff, ask the parent state. + // If the subnet conversion was not assigned in this diff, ask the parent state. parentState, ok := d.stateVersions.GetState(d.parentID) if !ok { return ids.Empty, ids.Empty, nil, ErrMissingParentState diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 3bb156c5096b..c62333a383a9 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -824,7 +824,7 @@ func TestDiffSubnetConversion(t *testing.T) { Addr: addr, }) - // State should reflect new subnet manager after diff is applied + // State should reflect new subnet conversion after diff is applied require.NoError(d.Apply(state)) conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.NoError(err) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index c839d4b7034b..d626639bf0f7 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2376,13 +2376,13 @@ func (s *state) writeSubnetConversions() error { bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &c) if err != nil { - return fmt.Errorf("failed to marshal subnet manager: %w", err) + return fmt.Errorf("failed to marshal subnet conversion: %w", err) } s.subnetConversionCache.Put(subnetID, c) if err := s.subnetConversionDB.Put(subnetID[:], bytes); err != nil { - return fmt.Errorf("failed to write subnet manager: %w", err) + return fmt.Errorf("failed to write subnet conversion: %w", err) } } return nil diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 14069b1e4e43..9cc9eddf723b 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -541,7 +541,7 @@ func (e *StandardTxExecutor) ConvertSubnetTx(tx *txs.ConvertSubnetTx) error { avax.Consume(e.State, tx.Ins) // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) - // Set the new Subnet manager in the database + // Track the subnet conversion in the database // TODO: Populate the conversionID e.State.SetSubnetConversion(tx.Subnet, ids.Empty, tx.ChainID, tx.Address) return nil diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 837af0ba73a5..64686a1c744e 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -1996,7 +1996,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { expectedErr: ErrFlowCheckFailed, }, { - name: "attempted to remove subnet validator after subnet manager is set", + name: "attempted to remove subnet validator after subnet conversion has occurred", newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, nil).AnyTimes() @@ -2281,7 +2281,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { err: ErrFlowCheckFailed, }, { - name: "invalid if subnet manager is set", + name: "invalid after subnet conversion", newExecutor: func(ctrl *gomock.Controller) (*txs.TransformSubnetTx, *StandardTxExecutor) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) From f40c1b3ff8bc3877ec4e5edde410ec62b6fc27b0 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 14:15:41 -0400 Subject: [PATCH 079/155] nit --- vms/platformvm/state/diff_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index c62333a383a9..7441786ac1bf 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -776,12 +776,12 @@ func TestDiffSubnetConversion(t *testing.T) { var ( require = require.New(t) state = newTestState(t, memdb.New()) + subnetID = ids.GenerateTestID() expectedConversion = subnetConversion{ ConversionID: ids.GenerateTestID(), ChainID: ids.GenerateTestID(), Addr: []byte{1, 2, 3, 4}, } - subnetID = ids.GenerateTestID() ) conversionID, chainID, addr, err := state.GetSubnetConversion(subnetID) From 51f009b9e972be459794aadba2fd67a0ad3417f8 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 14:23:53 -0400 Subject: [PATCH 080/155] nit --- vms/platformvm/state/state.go | 2 +- vms/platformvm/state/state_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index d626639bf0f7..04ed6b619b4d 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -557,7 +557,7 @@ func New( subnetConversionDB := prefixdb.New(SubnetConversionPrefix, baseDB) subnetConversionCache, err := metercacher.New[ids.ID, subnetConversion]( - "subnet_manager_cache", + "subnet_conversion_cache", metricsReg, cache.NewSizedLRU[ids.ID, subnetConversion](execCfg.SubnetConversionCacheSize, func(_ ids.ID, c subnetConversion) int { return 3*ids.IDLen + len(c.Addr) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index ea13a2dd30ac..f2e0483a25c6 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1346,7 +1346,7 @@ func TestStateSubnetConversion(t *testing.T) { t.Run(test.name, func(t *testing.T) { var ( require = require.New(t) - initializedState = newTestState(t, memdb.New()) + state = newTestState(t, memdb.New()) subnetID = ids.GenerateTestID() expectedConversion = subnetConversion{ ConversionID: ids.GenerateTestID(), @@ -1355,7 +1355,7 @@ func TestStateSubnetConversion(t *testing.T) { } ) - conversionID, chainID, addr, err := initializedState.GetSubnetConversion(subnetID) + conversionID, chainID, addr, err := state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) require.Zero(subnetConversion{ ConversionID: conversionID, @@ -1363,9 +1363,9 @@ func TestStateSubnetConversion(t *testing.T) { Addr: addr, }) - test.setup(initializedState, subnetID, expectedConversion) + test.setup(state, subnetID, expectedConversion) - conversionID, chainID, addr, err = initializedState.GetSubnetConversion(subnetID) + conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) require.NoError(err) require.Equal( expectedConversion, From 05a7b136901e4dfed52d1eed9e6cf53be00ad391 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 14:26:51 -0400 Subject: [PATCH 081/155] nit --- vms/platformvm/service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index b7111318c02a..7c3bbc4596be 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -498,6 +498,7 @@ func (s *Service) GetSubnet(_ *http.Request, args *GetSubnetArgs, response *GetS response.ManagerChainID = chainID response.ManagerAddress = addr case database.ErrNotFound: + response.ConversionID = ids.Empty response.ManagerChainID = ids.Empty response.ManagerAddress = []byte(nil) default: From a1db89e32a8265019b13bc5207e6d9b1a859b4f7 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 14:31:26 -0400 Subject: [PATCH 082/155] nit --- vms/platformvm/state/state.go | 1 + 1 file changed, 1 insertion(+) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 04ed6b619b4d..0c92fde71ec5 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1795,6 +1795,7 @@ func (s *state) Close() error { s.rewardUTXODB.Close(), s.utxoDB.Close(), s.subnetBaseDB.Close(), + s.subnetConversionDB.Close(), s.transformedSubnetDB.Close(), s.supplyDB.Close(), s.chainDB.Close(), From 575b36ac37b279b5cdef3fdf0d31e4f8c3cc23fb Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 16:19:38 -0400 Subject: [PATCH 083/155] remove unneeded assignment --- vms/platformvm/state/state.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 0c92fde71ec5..a059323f6874 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2371,8 +2371,6 @@ func (s *state) writeSubnetOwners() error { func (s *state) writeSubnetConversions() error { for subnetID, c := range s.subnetConversions { - subnetID := subnetID - c := c delete(s.subnetConversions, subnetID) bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &c) From d2d0f69976f54fd3827c840260fb3d9ec667345d Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 16:26:46 -0400 Subject: [PATCH 084/155] update comment --- vms/platformvm/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/client.go b/vms/platformvm/client.go index 522d359055d8..505c545bdded 100644 --- a/vms/platformvm/client.go +++ b/vms/platformvm/client.go @@ -235,7 +235,7 @@ type GetSubnetClientResponse struct { Locktime uint64 // subnet transformation tx ID for a permissionless subnet SubnetTransformationTxID ids.ID - // subnet conversion information for a permissionless L1 + // subnet conversion information for an L1 ConversionID ids.ID ManagerChainID ids.ID ManagerAddress []byte From b6b6515ffa8458fc5df186f990bbeca3a64b5caa Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 16:51:41 -0400 Subject: [PATCH 085/155] update comment --- vms/platformvm/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 7c3bbc4596be..72be28e40f4d 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -439,7 +439,7 @@ type GetSubnetResponse struct { Locktime avajson.Uint64 `json:"locktime"` // subnet transformation tx ID for an elastic subnet SubnetTransformationTxID ids.ID `json:"subnetTransformationTxID"` - // subnet conversion information for a permissionless L1 + // subnet conversion information for an L1 ConversionID ids.ID `json:"conversionID"` ManagerChainID ids.ID `json:"managerChainID"` ManagerAddress types.JSONByteSlice `json:"managerAddress"` From 6ffcd1b30910226ef2dc8bf9c133598eb4bc49d6 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 19 Oct 2024 17:27:33 -0400 Subject: [PATCH 086/155] update doc --- vms/platformvm/service.md | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/service.md b/vms/platformvm/service.md index f4cd28cd35f1..81c929b0ecda 100644 --- a/vms/platformvm/service.md +++ b/vms/platformvm/service.md @@ -1204,7 +1204,7 @@ Testnet: U8iRqJoiJm8xZHAacmvYyZVwqQx6uDNtQeP3CQ6fcgQk3JqnK ### `platform.getSubnet` -Get owners and elastic info about the Subnet. +Get owners and elastic info about the Subnet or L1. **Signature:** @@ -1217,7 +1217,10 @@ platform.getSubnet({ controlKeys: []string, threshold: string, locktime: string, - subnetTransformationTxID: string + subnetTransformationTxID: string, + conversionID: string, + managerChainID: string, + managerAddress: string } ``` @@ -1226,8 +1229,10 @@ platform.getSubnet({ a permissioned subnet. If the Subnet is a PoS Subnet, then `threshold` will be `0` and `controlKeys` will be empty. - changes can not be made into the subnet until `locktime` is in the past. -- `subnetTransformationTxID` is the ID of the transaction that changed the subnet into a elastic one, - for when this change was performed. +- `subnetTransformationTxID` is the ID of the transaction that changed the subnet into an elastic one, if it exists. +- `conversionID` is the ID of the conversion from a permissioned Subnet into an L1, if it exists. +- `managerChainID` is the ChainID that has the ability to modify this L1s validator set, if it exists. +- `managerAddress` is the address that has the ability to modify this L1s validator set, if it exists. **Example Call:** @@ -1250,7 +1255,10 @@ curl -X POST --data '{ "controlKeys": ["P-fuji1ztvstx6naeg6aarfd047fzppdt8v4gsah88e0c","P-fuji193kvt4grqewv6ce2x59wnhydr88xwdgfcedyr3"], "threshold": "1", "locktime": "0", - "subnetTransformationTxID": "11111111111111111111111111111111LpoYY" + "subnetTransformationTxID": "11111111111111111111111111111111LpoYY", + "conversionID": "11111111111111111111111111111111LpoYY", + "managerChainID": "11111111111111111111111111111111LpoYY", + "managerAddress": null }, "id": 1 } From ad001806ca9f191294126842e766b8043450b5bb Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 20 Oct 2024 13:15:23 -0400 Subject: [PATCH 087/155] Address comments --- vms/platformvm/service.go | 8 +-- vms/platformvm/service.md | 2 +- vms/platformvm/state/diff.go | 20 +++----- vms/platformvm/state/diff_test.go | 50 +++++-------------- vms/platformvm/state/mock_chain.go | 18 +++---- vms/platformvm/state/mock_diff.go | 18 +++---- vms/platformvm/state/mock_state.go | 18 +++---- vms/platformvm/state/state.go | 38 +++++++------- vms/platformvm/state/state_test.go | 30 ++++------- .../txs/executor/staker_tx_verification.go | 2 +- .../txs/executor/standard_tx_executor.go | 11 +++- .../txs/executor/subnet_tx_verification.go | 2 +- 12 files changed, 86 insertions(+), 131 deletions(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 72be28e40f4d..22153a97ece6 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -491,12 +491,12 @@ func (s *Service) GetSubnet(_ *http.Request, args *GetSubnetArgs, response *GetS return err } - switch conversionID, chainID, addr, err := s.vm.state.GetSubnetConversion(args.SubnetID); err { + switch c, err := s.vm.state.GetSubnetConversion(args.SubnetID); err { case nil: response.IsPermissioned = false - response.ConversionID = conversionID - response.ManagerChainID = chainID - response.ManagerAddress = addr + response.ConversionID = c.ConversionID + response.ManagerChainID = c.ChainID + response.ManagerAddress = c.Addr case database.ErrNotFound: response.ConversionID = ids.Empty response.ManagerChainID = ids.Empty diff --git a/vms/platformvm/service.md b/vms/platformvm/service.md index 81c929b0ecda..dbb12907694b 100644 --- a/vms/platformvm/service.md +++ b/vms/platformvm/service.md @@ -1204,7 +1204,7 @@ Testnet: U8iRqJoiJm8xZHAacmvYyZVwqQx6uDNtQeP3CQ6fcgQk3JqnK ### `platform.getSubnet` -Get owners and elastic info about the Subnet or L1. +Get owners and info about the Subnet or L1. **Signature:** diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index e2cfd648296d..9fe6a62363c0 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -53,7 +53,7 @@ type diff struct { // Subnet ID --> Owner of the subnet subnetOwners map[ids.ID]fx.Owner // Subnet ID --> Conversion of the subnet - subnetConversions map[ids.ID]subnetConversion + subnetConversions map[ids.ID]SubnetConversion // Subnet ID --> Tx that transforms the subnet transformedSubnets map[ids.ID]*txs.Tx @@ -83,7 +83,7 @@ func NewDiff( accruedFees: parentState.GetAccruedFees(), expiryDiff: newExpiryDiff(), subnetOwners: make(map[ids.ID]fx.Owner), - subnetConversions: make(map[ids.ID]subnetConversion), + subnetConversions: make(map[ids.ID]SubnetConversion), }, nil } @@ -357,25 +357,21 @@ func (d *diff) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { d.subnetOwners[subnetID] = owner } -func (d *diff) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { +func (d *diff) GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) { if c, ok := d.subnetConversions[subnetID]; ok { - return c.ConversionID, c.ChainID, c.Addr, nil + return c, nil } // If the subnet conversion was not assigned in this diff, ask the parent state. parentState, ok := d.stateVersions.GetState(d.parentID) if !ok { - return ids.Empty, ids.Empty, nil, ErrMissingParentState + return SubnetConversion{}, ErrMissingParentState } return parentState.GetSubnetConversion(subnetID) } -func (d *diff) SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { - d.subnetConversions[subnetID] = subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - } +func (d *diff) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { + d.subnetConversions[subnetID] = c } func (d *diff) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { @@ -578,7 +574,7 @@ func (d *diff) Apply(baseState Chain) error { baseState.SetSubnetOwner(subnetID, owner) } for subnetID, c := range d.subnetConversions { - baseState.SetSubnetConversion(subnetID, c.ConversionID, c.ChainID, c.Addr) + baseState.SetSubnetConversion(subnetID, c) } return nil } diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 7441786ac1bf..013a918b60fa 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -777,65 +777,39 @@ func TestDiffSubnetConversion(t *testing.T) { require = require.New(t) state = newTestState(t, memdb.New()) subnetID = ids.GenerateTestID() - expectedConversion = subnetConversion{ + expectedConversion = SubnetConversion{ ConversionID: ids.GenerateTestID(), ChainID: ids.GenerateTestID(), Addr: []byte{1, 2, 3, 4}, } ) - conversionID, chainID, addr, err := state.GetSubnetConversion(subnetID) + actualConversion, err := state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Zero(subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - }) + require.Zero(actualConversion) d, err := NewDiffOn(state) require.NoError(err) - conversionID, chainID, addr, err = d.GetSubnetConversion(subnetID) + actualConversion, err = d.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Zero(subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - }) + require.Zero(actualConversion) // Setting a subnet conversion should be reflected on diff not state - d.SetSubnetConversion(subnetID, expectedConversion.ConversionID, expectedConversion.ChainID, expectedConversion.Addr) - conversionID, chainID, addr, err = d.GetSubnetConversion(subnetID) + d.SetSubnetConversion(subnetID, expectedConversion) + actualConversion, err = d.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal( - expectedConversion, - subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - }, - ) + require.Equal(expectedConversion, actualConversion) - conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) + actualConversion, err = state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Zero(subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - }) + require.Zero(actualConversion) // State should reflect new subnet conversion after diff is applied require.NoError(d.Apply(state)) - conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) + actualConversion, err = state.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal( - expectedConversion, - subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - }, - ) + require.Equal(expectedConversion, actualConversion) } func TestDiffStacking(t *testing.T) { diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index f4f8dc661b90..27daeae3a101 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -354,14 +354,12 @@ func (mr *MockChainMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomo } // GetSubnetConversion mocks base method. -func (m *MockChain) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { +func (m *MockChain) GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(ids.ID) - ret2, _ := ret[2].([]byte) - ret3, _ := ret[3].(error) - return ret0, ret1, ret2, ret3 + ret0, _ := ret[0].(SubnetConversion) + ret1, _ := ret[1].(error) + return ret0, ret1 } // GetSubnetConversion indicates an expected call of GetSubnetConversion. @@ -575,15 +573,15 @@ func (mr *MockChainMockRecorder) SetFeeState(f any) *gomock.Call { } // SetSubnetConversion mocks base method. -func (m *MockChain) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { +func (m *MockChain) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, c) } // SetSubnetConversion indicates an expected call of SetSubnetConversion. -func (mr *MockChainMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { +func (mr *MockChainMockRecorder) SetSubnetConversion(subnetID, c any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockChain)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockChain)(nil).SetSubnetConversion), subnetID, c) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index bbeed71080b1..8732fc49b406 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -368,14 +368,12 @@ func (mr *MockDiffMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomoc } // GetSubnetConversion mocks base method. -func (m *MockDiff) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { +func (m *MockDiff) GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(ids.ID) - ret2, _ := ret[2].([]byte) - ret3, _ := ret[3].(error) - return ret0, ret1, ret2, ret3 + ret0, _ := ret[0].(SubnetConversion) + ret1, _ := ret[1].(error) + return ret0, ret1 } // GetSubnetConversion indicates an expected call of GetSubnetConversion. @@ -589,15 +587,15 @@ func (mr *MockDiffMockRecorder) SetFeeState(f any) *gomock.Call { } // SetSubnetConversion mocks base method. -func (m *MockDiff) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { +func (m *MockDiff) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, c) } // SetSubnetConversion indicates an expected call of SetSubnetConversion. -func (mr *MockDiffMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { +func (mr *MockDiffMockRecorder) SetSubnetConversion(subnetID, c any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockDiff)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockDiff)(nil).SetSubnetConversion), subnetID, c) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index ed44205d9b49..a17593982572 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -558,14 +558,12 @@ func (mr *MockStateMockRecorder) GetStatelessBlock(blockID any) *gomock.Call { } // GetSubnetConversion mocks base method. -func (m *MockState) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { +func (m *MockState) GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSubnetConversion", subnetID) - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(ids.ID) - ret2, _ := ret[2].([]byte) - ret3, _ := ret[3].(error) - return ret0, ret1, ret2, ret3 + ret0, _ := ret[0].(SubnetConversion) + ret1, _ := ret[1].(error) + return ret0, ret1 } // GetSubnetConversion indicates an expected call of GetSubnetConversion. @@ -848,15 +846,15 @@ func (mr *MockStateMockRecorder) SetLastAccepted(blkID any) *gomock.Call { } // SetSubnetConversion mocks base method. -func (m *MockState) SetSubnetConversion(subnetID, conversionID, chainID ids.ID, addr []byte) { +func (m *MockState) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSubnetConversion", subnetID, conversionID, chainID, addr) + m.ctrl.Call(m, "SetSubnetConversion", subnetID, c) } // SetSubnetConversion indicates an expected call of SetSubnetConversion. -func (mr *MockStateMockRecorder) SetSubnetConversion(subnetID, conversionID, chainID, addr any) *gomock.Call { +func (mr *MockStateMockRecorder) SetSubnetConversion(subnetID, c any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockState)(nil).SetSubnetConversion), subnetID, conversionID, chainID, addr) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetConversion", reflect.TypeOf((*MockState)(nil).SetSubnetConversion), subnetID, c) } // SetSubnetOwner mocks base method. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index a059323f6874..58c2056570bd 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -123,8 +123,8 @@ type Chain interface { GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) - GetSubnetConversion(subnetID ids.ID) (conversionID ids.ID, chainID ids.ID, addr []byte, err error) - SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) + GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) + SetSubnetConversion(subnetID ids.ID, c SubnetConversion) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) AddSubnetTransformation(transformSubnetTx *txs.Tx) @@ -366,8 +366,8 @@ type state struct { subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner; if the entry is nil, it is not in the database subnetOwnerDB database.Database - subnetConversions map[ids.ID]subnetConversion // map of subnetID -> conversion of the subnet - subnetConversionCache cache.Cacher[ids.ID, subnetConversion] // cache of subnetID -> conversion + subnetConversions map[ids.ID]SubnetConversion // map of subnetID -> conversion of the subnet + subnetConversionCache cache.Cacher[ids.ID, SubnetConversion] // cache of subnetID -> conversion subnetConversionDB database.Database transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx @@ -441,7 +441,7 @@ type fxOwnerAndSize struct { size int } -type subnetConversion struct { +type SubnetConversion struct { ConversionID ids.ID `serialize:"true"` ChainID ids.ID `serialize:"true"` Addr []byte `serialize:"true"` @@ -556,10 +556,10 @@ func New( } subnetConversionDB := prefixdb.New(SubnetConversionPrefix, baseDB) - subnetConversionCache, err := metercacher.New[ids.ID, subnetConversion]( + subnetConversionCache, err := metercacher.New[ids.ID, SubnetConversion]( "subnet_conversion_cache", metricsReg, - cache.NewSizedLRU[ids.ID, subnetConversion](execCfg.SubnetConversionCacheSize, func(_ ids.ID, c subnetConversion) int { + cache.NewSizedLRU[ids.ID, SubnetConversion](execCfg.SubnetConversionCacheSize, func(_ ids.ID, c SubnetConversion) int { return 3*ids.IDLen + len(c.Addr) }), ) @@ -669,7 +669,7 @@ func New( subnetOwnerDB: subnetOwnerDB, subnetOwnerCache: subnetOwnerCache, - subnetConversions: make(map[ids.ID]subnetConversion), + subnetConversions: make(map[ids.ID]SubnetConversion), subnetConversionDB: subnetConversionDB, subnetConversionCache: subnetConversionCache, @@ -859,34 +859,30 @@ func (s *state) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { s.subnetOwners[subnetID] = owner } -func (s *state) GetSubnetConversion(subnetID ids.ID) (ids.ID, ids.ID, []byte, error) { +func (s *state) GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) { if c, ok := s.subnetConversions[subnetID]; ok { - return c.ConversionID, c.ChainID, c.Addr, nil + return c, nil } if c, ok := s.subnetConversionCache.Get(subnetID); ok { - return c.ConversionID, c.ChainID, c.Addr, nil + return c, nil } bytes, err := s.subnetConversionDB.Get(subnetID[:]) if err != nil { - return ids.Empty, ids.Empty, nil, err + return SubnetConversion{}, err } - var c subnetConversion + var c SubnetConversion if _, err := block.GenesisCodec.Unmarshal(bytes, &c); err != nil { - return ids.Empty, ids.Empty, nil, err + return SubnetConversion{}, err } s.subnetConversionCache.Put(subnetID, c) - return c.ConversionID, c.ChainID, c.Addr, nil + return c, nil } -func (s *state) SetSubnetConversion(subnetID ids.ID, conversionID ids.ID, chainID ids.ID, addr []byte) { - s.subnetConversions[subnetID] = subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - } +func (s *state) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { + s.subnetConversions[subnetID] = c } func (s *state) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index f2e0483a25c6..d29129500435 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1326,18 +1326,17 @@ func TestStateSubnetOwner(t *testing.T) { func TestStateSubnetConversion(t *testing.T) { tests := []struct { name string - setup func(s *state, subnetID ids.ID, c subnetConversion) + setup func(s *state, subnetID ids.ID, c SubnetConversion) }{ { name: "in-memory", - setup: func(s *state, subnetID ids.ID, c subnetConversion) { - s.SetSubnetConversion(subnetID, c.ConversionID, c.ChainID, c.Addr) + setup: func(s *state, subnetID ids.ID, c SubnetConversion) { + s.SetSubnetConversion(subnetID, c) }, }, { name: "cache", - setup: func(s *state, subnetID ids.ID, c subnetConversion) { - s.subnetConversionCache.Flush() + setup: func(s *state, subnetID ids.ID, c SubnetConversion) { s.subnetConversionCache.Put(subnetID, c) }, }, @@ -1348,33 +1347,22 @@ func TestStateSubnetConversion(t *testing.T) { require = require.New(t) state = newTestState(t, memdb.New()) subnetID = ids.GenerateTestID() - expectedConversion = subnetConversion{ + expectedConversion = SubnetConversion{ ConversionID: ids.GenerateTestID(), ChainID: ids.GenerateTestID(), Addr: []byte{'a', 'd', 'd', 'r'}, } ) - conversionID, chainID, addr, err := state.GetSubnetConversion(subnetID) + actualConversion, err := state.GetSubnetConversion(subnetID) require.ErrorIs(err, database.ErrNotFound) - require.Zero(subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - }) + require.Zero(actualConversion) test.setup(state, subnetID, expectedConversion) - conversionID, chainID, addr, err = state.GetSubnetConversion(subnetID) + actualConversion, err = state.GetSubnetConversion(subnetID) require.NoError(err) - require.Equal( - expectedConversion, - subnetConversion{ - ConversionID: conversionID, - ChainID: chainID, - Addr: addr, - }, - ) + require.Equal(expectedConversion, actualConversion) }) } } diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 69b8cd567586..72ef8561ad43 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -308,7 +308,7 @@ func verifyRemoveSubnetValidatorTx( } if backend.Config.UpgradeConfig.IsEtnaActivated(currentTimestamp) { - _, _, _, err := chainState.GetSubnetConversion(tx.Subnet) + _, err := chainState.GetSubnetConversion(tx.Subnet) if err == nil { return nil, false, fmt.Errorf("%w: %q", ErrRemoveValidatorManagedSubnet, tx.Subnet) } diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 9cc9eddf723b..1c08880a94e0 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -542,8 +542,15 @@ func (e *StandardTxExecutor) ConvertSubnetTx(tx *txs.ConvertSubnetTx) error { // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) // Track the subnet conversion in the database - // TODO: Populate the conversionID - e.State.SetSubnetConversion(tx.Subnet, ids.Empty, tx.ChainID, tx.Address) + e.State.SetSubnetConversion( + tx.Subnet, + state.SubnetConversion{ + // TODO: Populate the conversionID + ConversionID: ids.Empty, + ChainID: tx.ChainID, + Addr: tx.Address, + }, + ) return nil } diff --git a/vms/platformvm/txs/executor/subnet_tx_verification.go b/vms/platformvm/txs/executor/subnet_tx_verification.go index 7466fd78227e..59acbe650491 100644 --- a/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -43,7 +43,7 @@ func verifyPoASubnetAuthorization( return nil, err } - _, _, _, err = chainState.GetSubnetConversion(subnetID) + _, err = chainState.GetSubnetConversion(subnetID) if err == nil { return nil, fmt.Errorf("%q %w", subnetID, errIsImmutable) } From f1b07d294a98b6035cdd5e8804ec602826fb857b Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 20 Oct 2024 13:30:34 -0400 Subject: [PATCH 088/155] nit --- .../txs/executor/create_chain_test.go | 8 ++-- .../txs/executor/standard_tx_executor_test.go | 41 ++++++++++++++----- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index cf3e0e8d2cf2..7f9919e4a8f5 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -288,9 +288,11 @@ func TestEtnaCreateChainTxInvalidWithManagedSubnet(t *testing.T) { stateDiff.SetSubnetConversion( subnetID, - ids.GenerateTestID(), - ids.GenerateTestID(), - []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, + state.SubnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: []byte("address"), + }, ) feeCalculator := state.PickFeeCalculator(env.config, builderDiff) diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 64686a1c744e..e3f6a67b21b3 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -915,9 +915,11 @@ func TestEtnaStandardTxExecutorAddSubnetValidator(t *testing.T) { onAcceptState.SetSubnetConversion( subnetID, - ids.GenerateTestID(), - ids.GenerateTestID(), - []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, + state.SubnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: []byte("address"), + }, ) executor := StandardTxExecutor{ @@ -1999,7 +2001,14 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { name: "attempted to remove subnet validator after subnet conversion has occurred", newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) - env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), ids.GenerateTestID(), []byte{'a', 'd', 'd', 'r', 'e', 's', 's'}, nil).AnyTimes() + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return( + state.SubnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: []byte("address"), + }, + nil, + ).AnyTimes() env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() cfg := &config.Config{ @@ -2483,7 +2492,14 @@ func TestStandardExecutorConvertSubnetTx(t *testing.T) { { name: "invalid if subnet is converted", updateExecutor: func(e *StandardTxExecutor) { - e.State.SetSubnetConversion(subnetID, ids.GenerateTestID(), ids.GenerateTestID(), nil) + e.State.SetSubnetConversion( + subnetID, + state.SubnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: utils.RandomBytes(32), + }, + ) }, expectedErr: errIsImmutable, }, @@ -2571,12 +2587,17 @@ func TestStandardExecutorConvertSubnetTx(t *testing.T) { require.Equal(expectedUTXO, utxo) } - stateConversionID, stateChainID, stateAddress, err := diff.GetSubnetConversion(subnetID) + stateConversion, err := diff.GetSubnetConversion(subnetID) require.NoError(err) - // TODO: Update this test when we populate the correct conversionID - require.Zero(stateConversionID) - require.Equal(chainID, stateChainID) - require.Equal(address, stateAddress) + require.Equal( + state.SubnetConversion{ + // TODO: Specify the correct conversionID + ConversionID: ids.Empty, + ChainID: chainID, + Addr: address, + }, + stateConversion, + ) }) } } From 7fb99c068b63d5937d098e088033cb009e394c1d Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 20 Oct 2024 15:21:42 -0400 Subject: [PATCH 089/155] fix unit tests --- .../txs/executor/standard_tx_executor_test.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index e3f6a67b21b3..0301748101bc 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -2259,7 +2259,10 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) - env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.Empty, ids.Empty, nil, database.ErrNotFound).Times(1) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return( + state.SubnetConversion{}, + database.ErrNotFound, + ).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) env.flowChecker.EXPECT().VerifySpend( @@ -2298,7 +2301,14 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) - env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.GenerateTestID(), ids.GenerateTestID(), make([]byte, 20), nil) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return( + state.SubnetConversion{ + ConversionID: ids.GenerateTestID(), + ChainID: ids.GenerateTestID(), + Addr: make([]byte, 20), + }, + nil, + ) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) @@ -2333,7 +2343,10 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { subnetOwner := fxmock.NewOwner(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) - env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return(ids.Empty, ids.Empty, nil, database.ErrNotFound).Times(1) + env.state.EXPECT().GetSubnetConversion(env.unsignedTx.Subnet).Return( + state.SubnetConversion{}, + database.ErrNotFound, + ).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) env.flowChecker.EXPECT().VerifySpend( From f31cfc670423a7d02b31f5b3c2eb19ac98c6e523 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 20 Oct 2024 16:36:50 -0400 Subject: [PATCH 090/155] Add SoV Excess to P-chain state --- .../block/executor/proposal_block_test.go | 2 ++ .../block/executor/standard_block_test.go | 2 ++ .../block/executor/verifier_test.go | 5 ++++ vms/platformvm/state/diff.go | 11 ++++++++ vms/platformvm/state/diff_test.go | 25 +++++++++++++++++ vms/platformvm/state/mock_chain.go | 26 ++++++++++++++++++ vms/platformvm/state/mock_diff.go | 26 ++++++++++++++++++ vms/platformvm/state/mock_state.go | 26 ++++++++++++++++++ vms/platformvm/state/state.go | 27 +++++++++++++++++++ vms/platformvm/state/state_test.go | 16 +++++++++++ 10 files changed, 166 insertions(+) diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 4a4154b02e50..c0a597d77335 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -91,6 +91,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { // setup state to validate proposal block transaction onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() onParentAccept.EXPECT().GetCurrentStakerIterator().Return( @@ -162,6 +163,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { onParentAccept := state.NewMockDiff(ctrl) onParentAccept.EXPECT().GetTimestamp().Return(parentTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index 8e62937c9239..d9ad860d3d3b 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -59,6 +59,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { chainTime := env.clk.Time().Truncate(time.Second) onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() // wrong height @@ -134,6 +135,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() onParentAccept.EXPECT().GetFeeState().Return(gas.State{}).AnyTimes() + onParentAccept.EXPECT().GetSoVExcess().Return(gas.Gas(0)).AnyTimes() onParentAccept.EXPECT().GetAccruedFees().Return(uint64(0)).AnyTimes() txID := ids.GenerateTestID() diff --git a/vms/platformvm/block/executor/verifier_test.go b/vms/platformvm/block/executor/verifier_test.go index f57b8fb4ed58..a076616701f2 100644 --- a/vms/platformvm/block/executor/verifier_test.go +++ b/vms/platformvm/block/executor/verifier_test.go @@ -103,6 +103,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { // One call for each of onCommitState and onAbortState. parentOnAcceptState.EXPECT().GetTimestamp().Return(timestamp).Times(2) parentOnAcceptState.EXPECT().GetFeeState().Return(gas.State{}).Times(2) + parentOnAcceptState.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(2) parentOnAcceptState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(2) backend := &backend{ @@ -335,6 +336,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { timestamp := time.Now() parentState.EXPECT().GetTimestamp().Return(timestamp).Times(1) parentState.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + parentState.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) parentState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) parentStatelessBlk.EXPECT().Height().Return(uint64(1)).Times(1) mempool.EXPECT().Remove(apricotBlk.Txs()).Times(1) @@ -597,6 +599,7 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { s.EXPECT().GetLastAccepted().Return(parentID).Times(3) s.EXPECT().GetTimestamp().Return(parentTime).Times(3) s.EXPECT().GetFeeState().Return(gas.State{}).Times(3) + s.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(3) s.EXPECT().GetAccruedFees().Return(uint64(0)).Times(3) onDecisionState, err := state.NewDiff(parentID, backend) @@ -695,6 +698,7 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { s.EXPECT().GetLastAccepted().Return(parentID).Times(3) s.EXPECT().GetTimestamp().Return(parentTime).Times(3) s.EXPECT().GetFeeState().Return(gas.State{}).Times(3) + s.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(3) s.EXPECT().GetAccruedFees().Return(uint64(0)).Times(3) onDecisionState, err := state.NewDiff(parentID, backend) @@ -811,6 +815,7 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { parentStatelessBlk.EXPECT().Height().Return(uint64(1)).Times(1) parentState.EXPECT().GetTimestamp().Return(timestamp).Times(1) parentState.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + parentState.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) parentState.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) parentStatelessBlk.EXPECT().Parent().Return(grandParentID).Times(1) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 9fe6a62363c0..da73854346ea 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -37,6 +37,7 @@ type diff struct { timestamp time.Time feeState gas.State + sovExcess gas.Gas accruedFees uint64 // Subnet ID --> supply of native asset of the subnet @@ -80,6 +81,7 @@ func NewDiff( stateVersions: stateVersions, timestamp: parentState.GetTimestamp(), feeState: parentState.GetFeeState(), + sovExcess: parentState.GetSoVExcess(), accruedFees: parentState.GetAccruedFees(), expiryDiff: newExpiryDiff(), subnetOwners: make(map[ids.ID]fx.Owner), @@ -117,6 +119,14 @@ func (d *diff) SetFeeState(feeState gas.State) { d.feeState = feeState } +func (d *diff) GetSoVExcess() gas.Gas { + return d.sovExcess +} + +func (d *diff) SetSoVExcess(excess gas.Gas) { + d.sovExcess = excess +} + func (d *diff) GetAccruedFees() uint64 { return d.accruedFees } @@ -482,6 +492,7 @@ func (d *diff) DeleteUTXO(utxoID ids.ID) { func (d *diff) Apply(baseState Chain) error { baseState.SetTimestamp(d.timestamp) baseState.SetFeeState(d.feeState) + baseState.SetSoVExcess(d.sovExcess) baseState.SetAccruedFees(d.accruedFees) for subnetID, supply := range d.currentSupply { baseState.SetCurrentSupply(subnetID, supply) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 013a918b60fa..e12a60808542 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -68,6 +68,24 @@ func TestDiffFeeState(t *testing.T) { assertChainsEqual(t, state, d) } +func TestDiffSoVExcess(t *testing.T) { + require := require.New(t) + + state := newTestState(t, memdb.New()) + + d, err := NewDiffOn(state) + require.NoError(err) + + initialExcess := state.GetSoVExcess() + newExcess := initialExcess + 1 + d.SetSoVExcess(newExcess) + require.Equal(newExcess, d.GetSoVExcess()) + require.Equal(initialExcess, state.GetSoVExcess()) + + require.NoError(d.Apply(state)) + assertChainsEqual(t, state, d) +} + func TestDiffAccruedFees(t *testing.T) { require := require.New(t) @@ -270,6 +288,7 @@ func TestDiffCurrentValidator(t *testing.T) { // Called in NewDiffOn state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) d, err := NewDiffOn(state) @@ -305,6 +324,7 @@ func TestDiffPendingValidator(t *testing.T) { // Called in NewDiffOn state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) d, err := NewDiffOn(state) @@ -346,6 +366,7 @@ func TestDiffCurrentDelegator(t *testing.T) { // Called in NewDiffOn state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) d, err := NewDiffOn(state) @@ -390,6 +411,7 @@ func TestDiffPendingDelegator(t *testing.T) { // Called in NewDiffOn state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) d, err := NewDiffOn(state) @@ -528,6 +550,7 @@ func TestDiffTx(t *testing.T) { // Called in NewDiffOn state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) d, err := NewDiffOn(state) @@ -626,6 +649,7 @@ func TestDiffUTXO(t *testing.T) { // Called in NewDiffOn state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) state.EXPECT().GetFeeState().Return(gas.State{}).Times(1) + state.EXPECT().GetSoVExcess().Return(gas.Gas(0)).Times(1) state.EXPECT().GetAccruedFees().Return(uint64(0)).Times(1) d, err := NewDiffOn(state) @@ -703,6 +727,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { require.Equal(expected.GetTimestamp(), actual.GetTimestamp()) require.Equal(expected.GetFeeState(), actual.GetFeeState()) + require.Equal(expected.GetSoVExcess(), actual.GetSoVExcess()) require.Equal(expected.GetAccruedFees(), actual.GetAccruedFees()) expectedCurrentSupply, err := expected.GetCurrentSupply(constants.PrimaryNetworkID) diff --git a/vms/platformvm/state/mock_chain.go b/vms/platformvm/state/mock_chain.go index 27daeae3a101..56c495924511 100644 --- a/vms/platformvm/state/mock_chain.go +++ b/vms/platformvm/state/mock_chain.go @@ -353,6 +353,20 @@ func (mr *MockChainMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), subnetID, nodeID) } +// GetSoVExcess mocks base method. +func (m *MockChain) GetSoVExcess() gas.Gas { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSoVExcess") + ret0, _ := ret[0].(gas.Gas) + return ret0 +} + +// GetSoVExcess indicates an expected call of GetSoVExcess. +func (mr *MockChainMockRecorder) GetSoVExcess() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockChain)(nil).GetSoVExcess)) +} + // GetSubnetConversion mocks base method. func (m *MockChain) GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) { m.ctrl.T.Helper() @@ -572,6 +586,18 @@ func (mr *MockChainMockRecorder) SetFeeState(f any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeState", reflect.TypeOf((*MockChain)(nil).SetFeeState), f) } +// SetSoVExcess mocks base method. +func (m *MockChain) SetSoVExcess(e gas.Gas) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSoVExcess", e) +} + +// SetSoVExcess indicates an expected call of SetSoVExcess. +func (mr *MockChainMockRecorder) SetSoVExcess(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockChain)(nil).SetSoVExcess), e) +} + // SetSubnetConversion mocks base method. func (m *MockChain) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_diff.go b/vms/platformvm/state/mock_diff.go index 8732fc49b406..b8362386af96 100644 --- a/vms/platformvm/state/mock_diff.go +++ b/vms/platformvm/state/mock_diff.go @@ -367,6 +367,20 @@ func (mr *MockDiffMockRecorder) GetPendingValidator(subnetID, nodeID any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), subnetID, nodeID) } +// GetSoVExcess mocks base method. +func (m *MockDiff) GetSoVExcess() gas.Gas { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSoVExcess") + ret0, _ := ret[0].(gas.Gas) + return ret0 +} + +// GetSoVExcess indicates an expected call of GetSoVExcess. +func (mr *MockDiffMockRecorder) GetSoVExcess() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockDiff)(nil).GetSoVExcess)) +} + // GetSubnetConversion mocks base method. func (m *MockDiff) GetSubnetConversion(subnetID ids.ID) (SubnetConversion, error) { m.ctrl.T.Helper() @@ -586,6 +600,18 @@ func (mr *MockDiffMockRecorder) SetFeeState(f any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFeeState", reflect.TypeOf((*MockDiff)(nil).SetFeeState), f) } +// SetSoVExcess mocks base method. +func (m *MockDiff) SetSoVExcess(e gas.Gas) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSoVExcess", e) +} + +// SetSoVExcess indicates an expected call of SetSoVExcess. +func (mr *MockDiffMockRecorder) SetSoVExcess(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockDiff)(nil).SetSoVExcess), e) +} + // SetSubnetConversion mocks base method. func (m *MockDiff) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index a17593982572..cb05f54fc6f7 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -527,6 +527,20 @@ func (mr *MockStateMockRecorder) GetRewardUTXOs(txID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockState)(nil).GetRewardUTXOs), txID) } +// GetSoVExcess mocks base method. +func (m *MockState) GetSoVExcess() gas.Gas { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSoVExcess") + ret0, _ := ret[0].(gas.Gas) + return ret0 +} + +// GetSoVExcess indicates an expected call of GetSoVExcess. +func (mr *MockStateMockRecorder) GetSoVExcess() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSoVExcess", reflect.TypeOf((*MockState)(nil).GetSoVExcess)) +} + // GetStartTime mocks base method. func (m *MockState) GetStartTime(nodeID ids.NodeID) (time.Time, error) { m.ctrl.T.Helper() @@ -845,6 +859,18 @@ func (mr *MockStateMockRecorder) SetLastAccepted(blkID any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), blkID) } +// SetSoVExcess mocks base method. +func (m *MockState) SetSoVExcess(e gas.Gas) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSoVExcess", e) +} + +// SetSoVExcess indicates an expected call of SetSoVExcess. +func (mr *MockStateMockRecorder) SetSoVExcess(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSoVExcess", reflect.TypeOf((*MockState)(nil).SetSoVExcess), e) +} + // SetSubnetConversion mocks base method. func (m *MockState) SetSubnetConversion(subnetID ids.ID, c SubnetConversion) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 58c2056570bd..53b109c23249 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -87,6 +87,7 @@ var ( TimestampKey = []byte("timestamp") FeeStateKey = []byte("fee state") + SoVExcessKey = []byte("sov excess") AccruedFeesKey = []byte("accrued fees") CurrentSupplyKey = []byte("current supply") LastAcceptedKey = []byte("last accepted") @@ -110,6 +111,9 @@ type Chain interface { GetFeeState() gas.State SetFeeState(f gas.State) + GetSoVExcess() gas.Gas + SetSoVExcess(e gas.Gas) + GetAccruedFees() uint64 SetAccruedFees(f uint64) @@ -289,6 +293,7 @@ type stateBlk struct { * |-- blocksReindexedKey -> nil * |-- timestampKey -> timestamp * |-- feeStateKey -> feeState + * |-- sovExcessKey -> sovExcess * |-- accruedFeesKey -> accruedFees * |-- currentSupplyKey -> currentSupply * |-- lastAcceptedKey -> lastAccepted @@ -386,6 +391,7 @@ type state struct { // The persisted fields represent the current database value timestamp, persistedTimestamp time.Time feeState, persistedFeeState gas.State + sovExcess, persistedSOVExcess gas.Gas accruedFees, persistedAccruedFees uint64 currentSupply, persistedCurrentSupply uint64 // [lastAccepted] is the most recently accepted block. @@ -1091,6 +1097,14 @@ func (s *state) SetFeeState(feeState gas.State) { s.feeState = feeState } +func (s *state) GetSoVExcess() gas.Gas { + return s.sovExcess +} + +func (s *state) SetSoVExcess(e gas.Gas) { + s.sovExcess = e +} + func (s *state) GetAccruedFees() uint64 { return s.accruedFees } @@ -1391,6 +1405,13 @@ func (s *state) loadMetadata() error { s.persistedFeeState = feeState s.SetFeeState(feeState) + sovExcess, err := database.WithDefault(database.GetUInt64, s.singletonDB, SoVExcessKey, 0) + if err != nil { + return err + } + s.persistedSOVExcess = gas.Gas(sovExcess) + s.SetSoVExcess(gas.Gas(sovExcess)) + accruedFees, err := database.WithDefault(database.GetUInt64, s.singletonDB, AccruedFeesKey, 0) if err != nil { return err @@ -2439,6 +2460,12 @@ func (s *state) writeMetadata() error { } s.persistedFeeState = s.feeState } + if s.sovExcess != s.persistedSOVExcess { + if err := database.PutUInt64(s.singletonDB, SoVExcessKey, uint64(s.sovExcess)); err != nil { + return fmt.Errorf("failed to write sov excess: %w", err) + } + s.persistedSOVExcess = s.sovExcess + } if s.accruedFees != s.persistedAccruedFees { if err := database.PutUInt64(s.singletonDB, AccruedFeesKey, s.accruedFees); err != nil { return fmt.Errorf("failed to write accrued fees: %w", err) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index d29129500435..6204540bd615 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1474,6 +1474,22 @@ func TestStateFeeStateCommitAndLoad(t *testing.T) { require.Equal(expectedFeeState, s.GetFeeState()) } +// Verify that committing the state writes the sov excess to the database and +// that loading the state fetches the sov excess from the database. +func TestStateSoVExcessCommitAndLoad(t *testing.T) { + require := require.New(t) + + db := memdb.New() + s := newTestState(t, db) + + const expectedSoVExcess gas.Gas = 10 + s.SetSoVExcess(expectedSoVExcess) + require.NoError(s.Commit()) + + s = newTestState(t, db) + require.Equal(expectedSoVExcess, s.GetSoVExcess()) +} + // Verify that committing the state writes the accrued fees to the database and // that loading the state fetches the accrued fees from the database. func TestStateAccruedFeesCommitAndLoad(t *testing.T) { From 58d5b8ddf85fe3a4a9a9b0739e6d3844e6d1ea9f Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 21 Oct 2024 18:42:17 -0400 Subject: [PATCH 091/155] Populate BLS key diffs for subnet validators --- vms/platformvm/state/state.go | 317 ++++++++++++++++++++-------------- 1 file changed, 186 insertions(+), 131 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 58c2056570bd..a34b587a6732 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -59,8 +59,9 @@ const ( var ( _ State = (*state)(nil) - errValidatorSetAlreadyPopulated = errors.New("validator set already populated") - errIsNotSubnet = errors.New("is not a subnet") + errValidatorSetAlreadyPopulated = errors.New("validator set already populated") + errIsNotSubnet = errors.New("is not a subnet") + errMissingPrimaryNetworkValidator = errors.New("missing primary network validator") BlockIDPrefix = []byte("blockID") BlockPrefix = []byte("block") @@ -2007,164 +2008,218 @@ func (s *state) writeExpiry() error { func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { + // We must write the primary network stakers last because writing subnet + // validator diffs may depend on the primary network validator diffs to + // inherit the public keys. + if subnetID == constants.PrimaryNetworkID { + continue + } + delete(s.currentStakers.validatorDiffs, subnetID) - // Select db to write to - validatorDB := s.currentSubnetValidatorList - delegatorDB := s.currentSubnetDelegatorList - if subnetID == constants.PrimaryNetworkID { - validatorDB = s.currentValidatorList - delegatorDB = s.currentDelegatorList + err := s.writeCurrentStakersSubnetDiff( + subnetID, + validatorDiffs, + updateValidators, + height, + codecVersion, + ) + if err != nil { + return err } + } - // Record the change in weight and/or public key for each validator. - for nodeID, validatorDiff := range validatorDiffs { - // Copy [nodeID] so it doesn't get overwritten next iteration. - nodeID := nodeID + if validatorDiffs, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID]; ok { + delete(s.currentStakers.validatorDiffs, constants.PrimaryNetworkID) - weightDiff := &ValidatorWeightDiff{ - Decrease: validatorDiff.validatorStatus == deleted, - } - switch validatorDiff.validatorStatus { - case added: - staker := validatorDiff.validator - weightDiff.Amount = staker.Weight - - // Invariant: Only the Primary Network contains non-nil public - // keys. - if staker.PublicKey != nil { - // Record that the public key for the validator is being - // added. This means the prior value for the public key was - // nil. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), - nil, - ) - if err != nil { - return err - } - } + err := s.writeCurrentStakersSubnetDiff( + constants.PrimaryNetworkID, + validatorDiffs, + updateValidators, + height, + codecVersion, + ) + if err != nil { + return err + } + } - // The validator is being added. - // - // Invariant: It's impossible for a delegator to have been - // rewarded in the same block that the validator was added. - startTime := uint64(staker.StartTime.Unix()) - metadata := &validatorMetadata{ - txID: staker.TxID, - lastUpdated: staker.StartTime, - - UpDuration: 0, - LastUpdated: startTime, - StakerStartTime: startTime, - PotentialReward: staker.PotentialReward, - PotentialDelegateeReward: 0, - } + // TODO: Move validator set management out of the state package + // + // Attempt to update the stake metrics + if !updateValidators { + return nil + } - metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) - if err != nil { - return fmt.Errorf("failed to serialize current validator: %w", err) - } + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return fmt.Errorf("failed to get total weight of primary network: %w", err) + } - if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { - return fmt.Errorf("failed to write current validator to list: %w", err) - } + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + s.metrics.SetTotalStake(totalWeight) + return nil +} - s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) - case deleted: - staker := validatorDiff.validator - weightDiff.Amount = staker.Weight - - // Invariant: Only the Primary Network contains non-nil public - // keys. - if staker.PublicKey != nil { - // Record that the public key for the validator is being - // removed. This means we must record the prior value of the - // public key. - // - // Note: We store the uncompressed public key here as it is - // significantly more efficient to parse when applying - // diffs. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), - bls.PublicKeyToUncompressedBytes(staker.PublicKey), - ) - if err != nil { - return err - } - } +func (s *state) writeCurrentStakersSubnetDiff( + subnetID ids.ID, + validatorDiffs map[ids.NodeID]*diffValidator, + updateValidators bool, + height uint64, + codecVersion uint16, +) error { + // Select db to write to + validatorDB := s.currentSubnetValidatorList + delegatorDB := s.currentSubnetDelegatorList + if subnetID == constants.PrimaryNetworkID { + validatorDB = s.currentValidatorList + delegatorDB = s.currentDelegatorList + } - if err := validatorDB.Delete(staker.TxID[:]); err != nil { - return fmt.Errorf("failed to delete current staker: %w", err) + // Record the change in weight and/or public key for each validator. + for nodeID, validatorDiff := range validatorDiffs { + var ( + staker *Staker + pk *bls.PublicKey + weightDiff = &ValidatorWeightDiff{ + Decrease: validatorDiff.validatorStatus == deleted, + } + ) + if validatorDiff.validatorStatus != unmodified { + staker = validatorDiff.validator + + pk = staker.PublicKey + // For non-primary network validators, the public key is inherited + // from the primary network. + if subnetID != constants.PrimaryNetworkID { + if vdr, ok := s.currentStakers.validators[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is still present after + // writing. + pk = vdr.validator.PublicKey + } else if vdr, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is being removed during + // writing. + pk = vdr.validator.PublicKey + } else { + // This should never happen. + return ErrMissingPrimaryNetworkValidator } - - s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) } - err := writeCurrentDelegatorDiff( - delegatorDB, - weightDiff, - validatorDiff, - codecVersion, - ) - if err != nil { - return err + weightDiff.Amount = staker.Weight + } + + switch validatorDiff.validatorStatus { + case added: + if pk != nil { + // Record that the public key for the validator is being added. + // This means the prior value for the public key was nil. + err := s.validatorPublicKeyDiffsDB.Put( + marshalDiffKey(subnetID, height, nodeID), + nil, + ) + if err != nil { + return err + } } - if weightDiff.Amount == 0 { - // No weight change to record; go to next validator. - continue + // The validator is being added. + // + // Invariant: It's impossible for a delegator to have been rewarded + // in the same block that the validator was added. + startTime := uint64(staker.StartTime.Unix()) + metadata := &validatorMetadata{ + txID: staker.TxID, + lastUpdated: staker.StartTime, + + UpDuration: 0, + LastUpdated: startTime, + StakerStartTime: startTime, + PotentialReward: staker.PotentialReward, + PotentialDelegateeReward: 0, } - err = s.validatorWeightDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - marshalWeightDiff(weightDiff), - ) + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) if err != nil { - return err + return fmt.Errorf("failed to serialize current validator: %w", err) } - // TODO: Move the validator set management out of the state package - if !updateValidators { - continue + if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { + return fmt.Errorf("failed to write current validator to list: %w", err) } - if weightDiff.Decrease { - err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) - } else { - if validatorDiff.validatorStatus == added { - staker := validatorDiff.validator - err = s.validators.AddStaker( - subnetID, - nodeID, - staker.PublicKey, - staker.TxID, - weightDiff.Amount, - ) - } else { - err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) + case deleted: + if pk != nil { + // Record that the public key for the validator is being + // removed. This means we must record the prior value of the + // public key. + // + // Note: We store the uncompressed public key here as it is + // significantly more efficient to parse when applying diffs. + err := s.validatorPublicKeyDiffsDB.Put( + marshalDiffKey(subnetID, height, nodeID), + bls.PublicKeyToUncompressedBytes(pk), + ) + if err != nil { + return err } } - if err != nil { - return fmt.Errorf("failed to update validator weight: %w", err) + + if err := validatorDB.Delete(staker.TxID[:]); err != nil { + return fmt.Errorf("failed to delete current staker: %w", err) } + + s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) } - } - // TODO: Move validator set management out of the state package - // - // Attempt to update the stake metrics - if !updateValidators { - return nil - } + err := writeCurrentDelegatorDiff( + delegatorDB, + weightDiff, + validatorDiff, + codecVersion, + ) + if err != nil { + return err + } - totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) - if err != nil { - return fmt.Errorf("failed to get total weight of primary network: %w", err) - } + if weightDiff.Amount == 0 { + // No weight change to record; go to next validator. + continue + } - s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - s.metrics.SetTotalStake(totalWeight) + err = s.validatorWeightDiffsDB.Put( + marshalDiffKey(subnetID, height, nodeID), + marshalWeightDiff(weightDiff), + ) + if err != nil { + return err + } + + // TODO: Move the validator set management out of the state package + if !updateValidators { + continue + } + + if weightDiff.Decrease { + err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) + } else { + if validatorDiff.validatorStatus == added { + err = s.validators.AddStaker( + subnetID, + nodeID, + pk, + staker.TxID, + weightDiff.Amount, + ) + } else { + err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + } + } + if err != nil { + return fmt.Errorf("failed to update validator weight: %w", err) + } + } return nil } From 273fbbecd3da929db6d00bab40511884efe5ee0c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 21 Oct 2024 18:50:47 -0400 Subject: [PATCH 092/155] Populate BLS key diffs for subnet validators --- vms/platformvm/state/state.go | 12 +- vms/platformvm/state/state_test.go | 351 +++++++++++++++++---------- vms/platformvm/validators/manager.go | 8 +- 3 files changed, 241 insertions(+), 130 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index a34b587a6732..bed7b4746fbe 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -190,6 +190,7 @@ type State interface { validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight uint64, endHeight uint64, + subnetID ids.ID, ) error SetHeight(height uint64) @@ -1244,10 +1245,11 @@ func (s *state) ApplyValidatorPublicKeyDiffs( validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight uint64, endHeight uint64, + subnetID ids.ID, ) error { diffIter := s.validatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix( - marshalStartDiffKey(constants.PrimaryNetworkID, startHeight), - constants.PrimaryNetworkID[:], + marshalStartDiffKey(subnetID, startHeight), + subnetID[:], ) defer diffIter.Release() @@ -2101,8 +2103,10 @@ func (s *state) writeCurrentStakersSubnetDiff( // writing. pk = vdr.validator.PublicKey } else { - // This should never happen. - return ErrMissingPrimaryNetworkValidator + // This should never happen as the primary network diffs are + // written last and subnet validator times must be a subset + // of the primary network validator times. + return errMissingPrimaryNetworkValidator } } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index d29129500435..ba8dbd65481f 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -28,6 +28,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -996,35 +997,43 @@ func TestStateAddRemoveValidator(t *testing.T) { state := newTestState(t, memdb.New()) var ( - numNodes = 3 - subnetID = ids.GenerateTestID() - startTime = time.Now() - endTime = startTime.Add(24 * time.Hour) - stakers = make([]Staker, numNodes) + numNodes = 5 + subnetID = ids.GenerateTestID() + startTime = time.Now() + endTime = startTime.Add(24 * time.Hour) + primaryStakers = make([]Staker, numNodes) + subnetStakers = make([]Staker, numNodes) ) - for i := 0; i < numNodes; i++ { - stakers[i] = Staker{ + for i := range primaryStakers { + sk, err := bls.NewSecretKey() + require.NoError(err) + + primaryStakers[i] = Staker{ TxID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), + PublicKey: bls.PublicFromSecretKey(sk), + SubnetID: constants.PrimaryNetworkID, Weight: uint64(i + 1), StartTime: startTime.Add(time.Duration(i) * time.Second), EndTime: endTime.Add(time.Duration(i) * time.Second), PotentialReward: uint64(i + 1), } - if i%2 == 0 { - stakers[i].SubnetID = subnetID - } else { - sk, err := bls.NewSecretKey() - require.NoError(err) - stakers[i].PublicKey = bls.PublicFromSecretKey(sk) - stakers[i].SubnetID = constants.PrimaryNetworkID + } + for i, primaryStaker := range primaryStakers { + subnetStakers[i] = Staker{ + TxID: ids.GenerateTestID(), + NodeID: primaryStaker.NodeID, + PublicKey: nil, // Key is inherited from the primary network + SubnetID: subnetID, + Weight: uint64(i + 1), + StartTime: primaryStaker.StartTime, + EndTime: primaryStaker.EndTime, + PotentialReward: uint64(i + 1), } } type diff struct { addedValidators []Staker - addedDelegators []Staker - removedDelegators []Staker removedValidators []Staker expectedPrimaryValidatorSet map[ids.NodeID]*validators.GetValidatorOutput @@ -1037,101 +1046,176 @@ func TestStateAddRemoveValidator(t *testing.T) { expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Add a subnet validator - addedValidators: []Staker{stakers[0]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[0].NodeID: { - NodeID: stakers[0].NodeID, - Weight: stakers[0].Weight, + // Add primary validator 0 + addedValidators: []Staker{primaryStakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, }, }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Remove a subnet validator - removedValidators: []Staker{stakers[0]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + // Add subnet validator 0 + addedValidators: []Staker{subnetStakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, + }, + }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + subnetStakers[0].NodeID: { + NodeID: subnetStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: subnetStakers[0].Weight, + }, + }, }, - { // Add a primary network validator - addedValidators: []Staker{stakers[1]}, + { + // Remove subnet validator 0 + removedValidators: []Staker{subnetStakers[0]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Do nothing + // Add primary network validator 1, and subnet validator 1 + addedValidators: []Staker{primaryStakers[1], subnetStakers[1]}, + // Remove primary network validator 0, and subnet validator 1 + removedValidators: []Staker{primaryStakers[0], subnetStakers[1]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[1].NodeID: { + NodeID: primaryStakers[1].NodeID, + PublicKey: primaryStakers[1].PublicKey, + Weight: primaryStakers[1].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, - { // Remove a primary network validator - removedValidators: []Staker{stakers[1]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + { + // Add primary network validator 2, and subnet validator 2 + addedValidators: []Staker{primaryStakers[2], subnetStakers[2]}, + // Remove primary network validator 1 + removedValidators: []Staker{primaryStakers[1]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[2].NodeID: { + NodeID: primaryStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: primaryStakers[2].Weight, + }, + }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + subnetStakers[2].NodeID: { + NodeID: subnetStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: subnetStakers[2].Weight, + }, + }, }, { - // Add 2 subnet validators and a primary network validator - addedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + // Add primary network and subnet validators 3 & 4 + addedValidators: []Staker{primaryStakers[3], primaryStakers[4], subnetStakers[3], subnetStakers[4]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[2].NodeID: { + NodeID: primaryStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: primaryStakers[2].Weight, + }, + primaryStakers[3].NodeID: { + NodeID: primaryStakers[3].NodeID, + PublicKey: primaryStakers[3].PublicKey, + Weight: primaryStakers[3].Weight, + }, + primaryStakers[4].NodeID: { + NodeID: primaryStakers[4].NodeID, + PublicKey: primaryStakers[4].PublicKey, + Weight: primaryStakers[4].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[0].NodeID: { - NodeID: stakers[0].NodeID, - Weight: stakers[0].Weight, + subnetStakers[2].NodeID: { + NodeID: subnetStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: subnetStakers[2].Weight, + }, + subnetStakers[3].NodeID: { + NodeID: subnetStakers[3].NodeID, + PublicKey: primaryStakers[3].PublicKey, + Weight: subnetStakers[3].Weight, }, - stakers[2].NodeID: { - NodeID: stakers[2].NodeID, - Weight: stakers[2].Weight, + subnetStakers[4].NodeID: { + NodeID: subnetStakers[4].NodeID, + PublicKey: primaryStakers[4].PublicKey, + Weight: subnetStakers[4].Weight, }, }, }, { - // Remove 2 subnet validators and a primary network validator. - removedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + // Remove primary network and subnet validators 2 & 3 & 4 + removedValidators: []Staker{ + primaryStakers[2], primaryStakers[3], primaryStakers[4], + subnetStakers[2], subnetStakers[3], subnetStakers[4], + }, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + }, + { + // Do nothing expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, } for currentIndex, diff := range diffs { - for _, added := range diff.addedValidators { - added := added - require.NoError(state.PutCurrentValidator(&added)) - } - for _, added := range diff.addedDelegators { - added := added - state.PutCurrentDelegator(&added) + d, err := NewDiffOn(state) + require.NoError(err) + + type subnetIDNodeID struct { + subnetID ids.ID + nodeID ids.NodeID } - for _, removed := range diff.removedDelegators { - removed := removed - state.DeleteCurrentDelegator(&removed) + var expectedValidators set.Set[subnetIDNodeID] + for _, added := range diff.addedValidators { + require.NoError(d.PutCurrentValidator(&added)) + + expectedValidators.Add(subnetIDNodeID{ + subnetID: added.SubnetID, + nodeID: added.NodeID, + }) } for _, removed := range diff.removedValidators { - removed := removed - state.DeleteCurrentValidator(&removed) + d.DeleteCurrentValidator(&removed) + + expectedValidators.Remove(subnetIDNodeID{ + subnetID: removed.SubnetID, + nodeID: removed.NodeID, + }) } + require.NoError(d.Apply(state)) + currentHeight := uint64(currentIndex + 1) state.SetHeight(currentHeight) require.NoError(state.Commit()) for _, added := range diff.addedValidators { + subnetNodeID := subnetIDNodeID{ + subnetID: added.SubnetID, + nodeID: added.NodeID, + } + if !expectedValidators.Contains(subnetNodeID) { + continue + } + gotValidator, err := state.GetCurrentValidator(added.SubnetID, added.NodeID) require.NoError(err) require.Equal(added, *gotValidator) @@ -1142,37 +1226,84 @@ func TestStateAddRemoveValidator(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) } + primaryValidatorSet := state.validators.GetMap(constants.PrimaryNetworkID) + delete(primaryValidatorSet, defaultValidatorNodeID) // Ignore the genesis validator + require.Equal(diff.expectedPrimaryValidatorSet, primaryValidatorSet) + + require.Equal(diff.expectedSubnetValidatorSet, state.validators.GetMap(subnetID)) + for i := 0; i < currentIndex; i++ { prevDiff := diffs[i] prevHeight := uint64(i + 1) - primaryValidatorSet := copyValidatorSet(diff.expectedPrimaryValidatorSet) - require.NoError(state.ApplyValidatorWeightDiffs( - context.Background(), - primaryValidatorSet, - currentHeight, - prevHeight+1, - constants.PrimaryNetworkID, - )) - requireEqualWeightsValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) - - require.NoError(state.ApplyValidatorPublicKeyDiffs( - context.Background(), - primaryValidatorSet, - currentHeight, - prevHeight+1, - )) - requireEqualPublicKeysValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) - - subnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) - require.NoError(state.ApplyValidatorWeightDiffs( - context.Background(), - subnetValidatorSet, - currentHeight, - prevHeight+1, - subnetID, - )) - requireEqualWeightsValidatorSet(require, prevDiff.expectedSubnetValidatorSet, subnetValidatorSet) + { + primaryValidatorSet := copyValidatorSet(diff.expectedPrimaryValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs( + context.Background(), + primaryValidatorSet, + currentHeight, + prevHeight+1, + constants.PrimaryNetworkID, + )) + require.NoError(state.ApplyValidatorPublicKeyDiffs( + context.Background(), + primaryValidatorSet, + currentHeight, + prevHeight+1, + constants.PrimaryNetworkID, + )) + require.Equal(prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) + } + + { + legacySubnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs( + context.Background(), + legacySubnetValidatorSet, + currentHeight, + prevHeight+1, + subnetID, + )) + + // Update the public keys of the subnet validators with the current + // primary network validator public keys + for nodeID, vdr := range legacySubnetValidatorSet { + if primaryVdr, ok := diff.expectedPrimaryValidatorSet[nodeID]; ok { + vdr.PublicKey = primaryVdr.PublicKey + } else { + vdr.PublicKey = nil + } + } + + require.NoError(state.ApplyValidatorPublicKeyDiffs( + context.Background(), + legacySubnetValidatorSet, + currentHeight, + prevHeight+1, + constants.PrimaryNetworkID, + )) + require.Equal(prevDiff.expectedSubnetValidatorSet, legacySubnetValidatorSet) + } + + { + subnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs( + context.Background(), + subnetValidatorSet, + currentHeight, + prevHeight+1, + subnetID, + )) + + require.NoError(state.ApplyValidatorPublicKeyDiffs( + context.Background(), + subnetValidatorSet, + currentHeight, + prevHeight+1, + subnetID, + )) + require.Equal(prevDiff.expectedSubnetValidatorSet, subnetValidatorSet) + } } } } @@ -1188,36 +1319,6 @@ func copyValidatorSet( return result } -func requireEqualWeightsValidatorSet( - require *require.Assertions, - expected map[ids.NodeID]*validators.GetValidatorOutput, - actual map[ids.NodeID]*validators.GetValidatorOutput, -) { - require.Len(actual, len(expected)) - for nodeID, expectedVdr := range expected { - require.Contains(actual, nodeID) - - actualVdr := actual[nodeID] - require.Equal(expectedVdr.NodeID, actualVdr.NodeID) - require.Equal(expectedVdr.Weight, actualVdr.Weight) - } -} - -func requireEqualPublicKeysValidatorSet( - require *require.Assertions, - expected map[ids.NodeID]*validators.GetValidatorOutput, - actual map[ids.NodeID]*validators.GetValidatorOutput, -) { - require.Len(actual, len(expected)) - for nodeID, expectedVdr := range expected { - require.Contains(actual, nodeID) - - actualVdr := actual[nodeID] - require.Equal(expectedVdr.NodeID, actualVdr.NodeID) - require.Equal(expectedVdr.PublicKey, actualVdr.PublicKey) - } -} - func TestParsedStateBlock(t *testing.T) { var ( require = require.New(t) diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index 7f1ea5ea6407..142db3e7635c 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -85,6 +85,7 @@ type State interface { validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight uint64, endHeight uint64, + subnetID ids.ID, ) error } @@ -271,7 +272,7 @@ func (m *manager) makePrimaryNetworkValidatorSet( validatorSet, currentHeight, lastDiffHeight, - constants.PlatformChainID, + constants.PrimaryNetworkID, ) if err != nil { return nil, 0, err @@ -282,6 +283,7 @@ func (m *manager) makePrimaryNetworkValidatorSet( validatorSet, currentHeight, lastDiffHeight, + constants.PrimaryNetworkID, ) return validatorSet, currentHeight, err } @@ -348,6 +350,10 @@ func (m *manager) makeSubnetValidatorSet( subnetValidatorSet, currentHeight, lastDiffHeight, + // TODO: Etna introduces L1s whose validators specify their own public + // keys, rather than inheriting them from the primary network. + // Therefore, this will need to use the subnetID after Etna. + constants.PrimaryNetworkID, ) return subnetValidatorSet, currentHeight, err } From 290ef974191e6ebef1660b69c391a494e2ac9455 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 21 Oct 2024 20:31:16 -0400 Subject: [PATCH 093/155] Update mocks --- vms/platformvm/state/mock_state.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index a17593982572..9d2568422517 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -149,17 +149,17 @@ func (mr *MockStateMockRecorder) AddUTXO(utxo any) *gomock.Call { } // ApplyValidatorPublicKeyDiffs mocks base method. -func (m *MockState) ApplyValidatorPublicKeyDiffs(ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight, endHeight uint64) error { +func (m *MockState) ApplyValidatorPublicKeyDiffs(ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, startHeight, endHeight uint64, subnetID ids.ID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ApplyValidatorPublicKeyDiffs", ctx, validators, startHeight, endHeight) + ret := m.ctrl.Call(m, "ApplyValidatorPublicKeyDiffs", ctx, validators, startHeight, endHeight, subnetID) ret0, _ := ret[0].(error) return ret0 } // ApplyValidatorPublicKeyDiffs indicates an expected call of ApplyValidatorPublicKeyDiffs. -func (mr *MockStateMockRecorder) ApplyValidatorPublicKeyDiffs(ctx, validators, startHeight, endHeight any) *gomock.Call { +func (mr *MockStateMockRecorder) ApplyValidatorPublicKeyDiffs(ctx, validators, startHeight, endHeight, subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorPublicKeyDiffs), ctx, validators, startHeight, endHeight) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorPublicKeyDiffs), ctx, validators, startHeight, endHeight, subnetID) } // ApplyValidatorWeightDiffs mocks base method. From 9155e1fd37f436f772568640f35cc813c4405c21 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 21 Oct 2024 20:34:50 -0400 Subject: [PATCH 094/155] Fix tests --- vms/platformvm/state/state_test.go | 850 ++++++++++------------------- 1 file changed, 283 insertions(+), 567 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index ba8dbd65481f..419586f7871c 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -5,7 +5,6 @@ package state import ( "context" - "fmt" "math" "math/rand" "sync" @@ -28,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -110,620 +110,336 @@ func TestStateSyncGenesis(t *testing.T) { ) } -// Whenever we store a staker, a whole bunch a data structures are updated +// Whenever we store a staker, a whole bunch of data structures are updated // This test is meant to capture which updates are carried out func TestPersistStakers(t *testing.T) { - tests := map[string]struct { - // Insert or delete a staker to state and store it - storeStaker func(*require.Assertions, ids.ID /*=subnetID*/, *state) *Staker - - // Check that the staker is duly stored/removed in P-chain state - checkStakerInState func(*require.Assertions, *state, *Staker) - - // Check whether validators are duly reported in the validator set, - // with the right weight and showing the BLS key - checkValidatorsSet func(*require.Assertions, *state, *Staker) + const ( + primaryValidatorDuration = 28 * 24 * time.Hour + primaryDelegatorDuration = 14 * 24 * time.Hour + subnetValidatorDuration = 21 * 24 * time.Hour + subnetDelegatorDuration = 14 * 24 * time.Hour + + primaryValidatorReward = iota + primaryDelegatorReward + ) + var ( + primaryValidatorStartTime = time.Now().Truncate(time.Second) + primaryValidatorEndTime = primaryValidatorStartTime.Add(primaryValidatorDuration) + primaryValidatorEndTimeUnix = uint64(primaryValidatorEndTime.Unix()) + + primaryDelegatorStartTime = primaryValidatorStartTime + primaryDelegatorEndTime = primaryDelegatorStartTime.Add(primaryDelegatorDuration) + primaryDelegatorEndTimeUnix = uint64(primaryDelegatorEndTime.Unix()) + + primaryValidatorData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: primaryValidatorEndTimeUnix, + Wght: 1234, + } + primaryDelegatorData = txs.Validator{ + NodeID: primaryValidatorData.NodeID, + End: primaryDelegatorEndTimeUnix, + Wght: 6789, + } + ) - // Check that node duly track stakers uptimes - checkValidatorUptimes func(*require.Assertions, *state, *Staker) + unsignedAddPrimaryNetworkValidator := createPermissionlessValidatorTx(t, constants.PrimaryNetworkID, primaryValidatorData) + addPrimaryNetworkValidator := &txs.Tx{Unsigned: unsignedAddPrimaryNetworkValidator} + require.NoError(t, addPrimaryNetworkValidator.Initialize(txs.Codec)) - // Check whether weight/bls keys diffs are duly stored - checkDiffs func(*require.Assertions, *state, *Staker, uint64) - }{ - "add current validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(endTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - ) + primaryNetworkPendingValidatorStaker, err := NewPendingStaker( + addPrimaryNetworkValidator.ID(), + unsignedAddPrimaryNetworkValidator, + ) + require.NoError(t, err) - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) + primaryNetworkCurrentValidatorStaker, err := NewCurrentStaker( + addPrimaryNetworkValidator.ID(), + unsignedAddPrimaryNetworkValidator, + primaryValidatorStartTime, + primaryValidatorReward, + ) + require.NoError(t, err) - staker, err := NewCurrentStaker( - addPermValTx.ID(), - utx, - time.Unix(startTime, 0), - validatorReward, - ) - r.NoError(err) + unsignedAddPrimaryNetworkDelegator := createPermissionlessDelegatorTx(constants.PrimaryNetworkID, primaryDelegatorData) + addPrimaryNetworkDelegator := &txs.Tx{Unsigned: unsignedAddPrimaryNetworkDelegator} + require.NoError(t, addPrimaryNetworkDelegator.Initialize(txs.Codec)) - r.NoError(s.PutCurrentValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - retrievedStaker, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.Equal(staker, retrievedStaker) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - r.Equal( - &validators.GetValidatorOutput{ - NodeID: staker.NodeID, - PublicKey: staker.PublicKey, - Weight: staker.Weight, - }, - valsMap[staker.NodeID], - ) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - upDuration, lastUpdated, err := s.GetUptime(staker.NodeID) - if staker.SubnetID != constants.PrimaryNetworkID { - // only primary network validators have uptimes - r.ErrorIs(err, database.ErrNotFound) - } else { - r.NoError(err) - r.Equal(upDuration, time.Duration(0)) - r.Equal(lastUpdated, staker.StartTime) - } - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: false, - Amount: staker.Weight, - }, weightDiff) - - blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - if staker.SubnetID == constants.PrimaryNetworkID { - r.NoError(err) - r.Nil(blsDiffBytes) - } else { - r.ErrorIs(err, database.ErrNotFound) - } - }, - }, - "add current delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert the delegator and its validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(valEndTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - delegatorReward uint64 = 5432 - ) + primaryNetworkPendingDelegatorStaker, err := NewPendingStaker( + addPrimaryNetworkDelegator.ID(), + unsignedAddPrimaryNetworkDelegator, + ) + require.NoError(t, err) - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) + primaryNetworkCurrentDelegatorStaker, err := NewCurrentStaker( + addPrimaryNetworkDelegator.ID(), + unsignedAddPrimaryNetworkDelegator, + primaryDelegatorStartTime, + primaryDelegatorReward, + ) + require.NoError(t, err) - val, err := NewCurrentStaker( - addPermValTx.ID(), - utxVal, - time.Unix(valStartTime, 0), - validatorReward, - ) - r.NoError(err) + tests := map[string]struct { + initialStakers []*Staker + initialTxs []*txs.Tx - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) + // Staker to insert or remove + staker *Staker + tx *txs.Tx // If tx is nil, the staker is being removed - del, err := NewCurrentStaker( - addPermDelTx.ID(), - utxDel, - time.Unix(delStartTime, 0), - delegatorReward, - ) - r.NoError(err) + // Check that the staker is duly stored/removed in P-chain state + expectedCurrentValidator *Staker + expectedPendingValidator *Staker + expectedCurrentDelegators []*Staker + expectedPendingDelegators []*Staker - r.NoError(s.PutCurrentValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + // Check that the validator entry has been set correctly in the + // in-memory validator set. + expectedValidatorSetOutput *validators.GetValidatorOutput - s.PutCurrentDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.True(delIt.Next()) - retrievedDelegator := delIt.Value() - r.False(delIt.Next()) - delIt.Release() - r.Equal(staker, retrievedDelegator) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - valOut := valsMap[staker.NodeID] - r.Equal(valOut.NodeID, staker.NodeID) - r.Equal(valOut.Weight, val.Weight+staker.Weight) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // validator's weight must increase of delegator's weight amount - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: false, - Amount: staker.Weight, - }, weightDiff) + // Check whether weight/bls keys diffs are duly stored + expectedWeightDiff *ValidatorWeightDiff + expectedPublicKeyDiff maybe.Maybe[*bls.PublicKey] + }{ + "add current primary network validator": { + staker: primaryNetworkCurrentValidatorStaker, + tx: addPrimaryNetworkValidator, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: primaryNetworkCurrentValidatorStaker.Weight, }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](nil), }, - "add pending validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(startTime), - End: uint64(endTime), - Wght: 1234, - } - ) - - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - staker, err := NewPendingStaker( - addPermValTx.ID(), - utx, - ) - r.NoError(err) - - r.NoError(s.PutPendingValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - retrievedStaker, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.Equal(staker, retrievedStaker) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - // pending validators are not showed in validators set - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // pending validators uptime is not tracked - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) + "add current primary network delegator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkCurrentDelegatorStaker, + tx: addPrimaryNetworkDelegator, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedCurrentDelegators: []*Staker{primaryNetworkCurrentDelegatorStaker}, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentDelegatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentDelegatorStaker.Weight + primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: primaryNetworkCurrentDelegatorStaker.Weight, }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // pending validators weight diff and bls diffs are not stored - _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - - _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) + }, + "add pending primary network validator": { + staker: primaryNetworkPendingValidatorStaker, + tx: addPrimaryNetworkValidator, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, + }, + "add pending primary network delegator": { + initialStakers: []*Staker{primaryNetworkPendingValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkPendingDelegatorStaker, + tx: addPrimaryNetworkDelegator, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, + expectedPendingDelegators: []*Staker{primaryNetworkPendingDelegatorStaker}, + }, + "delete current primary network validator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkCurrentValidatorStaker, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: primaryNetworkCurrentValidatorStaker.Weight, }, + expectedPublicKeyDiff: maybe.Some(primaryNetworkCurrentValidatorStaker.PublicKey), }, - "add pending delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert the delegator and its validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(valStartTime), - End: uint64(valEndTime), - Wght: 1234, - } - - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - Start: uint64(delStartTime), - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - ) - - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - val, err := NewPendingStaker(addPermValTx.ID(), utxVal) - r.NoError(err) - - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) - - del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) - r.NoError(err) - - r.NoError(s.PutPendingValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.PutPendingDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - return del + "delete current primary network delegator": { + initialStakers: []*Staker{ + primaryNetworkCurrentValidatorStaker, + primaryNetworkCurrentDelegatorStaker, + }, + initialTxs: []*txs.Tx{ + addPrimaryNetworkValidator, + addPrimaryNetworkDelegator, + }, + staker: primaryNetworkCurrentDelegatorStaker, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: primaryNetworkCurrentDelegatorStaker.Weight, }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.True(delIt.Next()) - retrievedDelegator := delIt.Value() - r.False(delIt.Next()) - delIt.Release() - r.Equal(staker, retrievedDelegator) + }, + "delete pending primary network validator": { + initialStakers: []*Staker{primaryNetworkPendingValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkPendingValidatorStaker, + }, + "delete pending primary network delegator": { + initialStakers: []*Staker{ + primaryNetworkPendingValidatorStaker, + primaryNetworkPendingDelegatorStaker, }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) + initialTxs: []*txs.Tx{ + addPrimaryNetworkValidator, + addPrimaryNetworkDelegator, }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, + staker: primaryNetworkPendingDelegatorStaker, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, }, - "delete current validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // add them remove the validator - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(endTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - ) + } - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) - staker, err := NewCurrentStaker( - addPermValTx.ID(), - utx, - time.Unix(startTime, 0), - validatorReward, - ) - r.NoError(err) + db := memdb.New() + state := newTestState(t, db) + + // create and store the initial stakers + for _, staker := range test.initialStakers { + switch { + case staker.Priority.IsCurrentValidator(): + require.NoError(state.PutCurrentValidator(staker)) + case staker.Priority.IsPendingValidator(): + require.NoError(state.PutPendingValidator(staker)) + case staker.Priority.IsCurrentDelegator(): + state.PutCurrentDelegator(staker) + case staker.Priority.IsPendingDelegator(): + state.PutPendingDelegator(staker) + } + } + for _, tx := range test.initialTxs { + state.AddTx(tx, status.Committed) + } - r.NoError(s.PutCurrentValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + state.SetHeight(0) + require.NoError(state.Commit()) - s.DeleteCurrentValidator(staker) - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - _, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - // deleted validators are not showed in the validators set anymore - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // uptimes of delete validators are dropped - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: true, - Amount: staker.Weight, - }, weightDiff) - - blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - if staker.SubnetID == constants.PrimaryNetworkID { - r.NoError(err) - r.Equal(bls.PublicKeyFromValidUncompressedBytes(blsDiffBytes), staker.PublicKey) + // create and store the staker under test + switch { + case test.staker.Priority.IsCurrentValidator(): + if test.tx != nil { + require.NoError(state.PutCurrentValidator(test.staker)) } else { - r.ErrorIs(err, database.ErrNotFound) + state.DeleteCurrentValidator(test.staker) } - }, - }, - "delete current delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert validator and delegator, then remove the delegator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(valEndTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - delegatorReward uint64 = 5432 - ) - - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - val, err := NewCurrentStaker( - addPermValTx.ID(), - utxVal, - time.Unix(valStartTime, 0), - validatorReward, - ) - r.NoError(err) - - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) + case test.staker.Priority.IsPendingValidator(): + if test.tx != nil { + require.NoError(state.PutPendingValidator(test.staker)) + } else { + state.DeletePendingValidator(test.staker) + } + case test.staker.Priority.IsCurrentDelegator(): + if test.tx != nil { + state.PutCurrentDelegator(test.staker) + } else { + state.DeleteCurrentDelegator(test.staker) + } + case test.staker.Priority.IsPendingDelegator(): + if test.tx != nil { + state.PutPendingDelegator(test.staker) + } else { + state.DeletePendingDelegator(test.staker) + } + } + if test.tx != nil { + state.AddTx(test.tx, status.Committed) + } - del, err := NewCurrentStaker( - addPermDelTx.ID(), - utxDel, - time.Unix(delStartTime, 0), - delegatorReward, - ) - r.NoError(err) + state.SetHeight(1) + require.NoError(state.Commit()) - r.NoError(s.PutCurrentValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + // Perform the checks once immediately after committing to the + // state, and once after re-loading the state from disk. + for i := 0; i < 2; i++ { + currentValidator, err := state.GetCurrentValidator(test.staker.SubnetID, test.staker.NodeID) + if test.expectedCurrentValidator == nil { + require.ErrorIs(err, database.ErrNotFound) - s.PutCurrentDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + // Only current validators should have uptimes + _, _, err := state.GetUptime(test.staker.NodeID) + require.ErrorIs(err, database.ErrNotFound) + } else { + require.NoError(err) + require.Equal(test.expectedCurrentValidator, currentValidator) + + // Current validators should also have uptimes + upDuration, lastUpdated, err := state.GetUptime(currentValidator.NodeID) + require.NoError(err) + require.Zero(upDuration) + require.Equal(currentValidator.StartTime, lastUpdated) + } - s.DeleteCurrentDelegator(del) - r.NoError(s.Commit()) + pendingValidator, err := state.GetPendingValidator(test.staker.SubnetID, test.staker.NodeID) + if test.expectedPendingValidator == nil { + require.ErrorIs(err, database.ErrNotFound) + } else { + require.NoError(err) + require.Equal(test.expectedPendingValidator, pendingValidator) + } - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.False(delIt.Next()) - delIt.Release() - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - valOut := valsMap[staker.NodeID] - r.Equal(valOut.NodeID, staker.NodeID) - r.Equal(valOut.Weight, val.Weight) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // validator's weight must decrease of delegator's weight amount - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: true, - Amount: staker.Weight, - }, weightDiff) - }, - }, - "delete pending validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(startTime), - End: uint64(endTime), - Wght: 1234, - } + it, err := state.GetCurrentDelegatorIterator(test.staker.SubnetID, test.staker.NodeID) + require.NoError(err) + require.Equal( + test.expectedCurrentDelegators, + iterator.ToSlice(it), ) - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - staker, err := NewPendingStaker( - addPermValTx.ID(), - utx, + it, err = state.GetPendingDelegatorIterator(test.staker.SubnetID, test.staker.NodeID) + require.NoError(err) + require.Equal( + test.expectedPendingDelegators, + iterator.ToSlice(it), ) - r.NoError(err) - - r.NoError(s.PutPendingValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - s.DeletePendingValidator(staker) - r.NoError(s.Commit()) - - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - _, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - - _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - }, - }, - "delete pending delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert validator and delegator the remove the validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(valStartTime), - End: uint64(valEndTime), - Wght: 1234, - } - - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - Start: uint64(delStartTime), - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } + require.Equal( + test.expectedValidatorSetOutput, + state.validators.GetMap(test.staker.SubnetID)[test.staker.NodeID], ) - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - val, err := NewPendingStaker(addPermValTx.ID(), utxVal) - r.NoError(err) - - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) - - del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) - r.NoError(err) - - r.NoError(s.PutPendingValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - - s.PutPendingDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeletePendingDelegator(del) - r.NoError(s.Commit()) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.False(delIt.Next()) - delIt.Release() - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, - }, - } - - subnetIDs := []ids.ID{constants.PrimaryNetworkID, ids.GenerateTestID()} - for _, subnetID := range subnetIDs { - for name, test := range tests { - t.Run(fmt.Sprintf("%s - subnetID %s", name, subnetID), func(t *testing.T) { - require := require.New(t) - - db := memdb.New() - state := newTestState(t, db) - - // create and store the staker - staker := test.storeStaker(require, subnetID, state) + diffKey := marshalDiffKey(test.staker.SubnetID, 1, test.staker.NodeID) + weightDiffBytes, err := state.validatorWeightDiffsDB.Get(diffKey) + if test.expectedWeightDiff == nil { + require.ErrorIs(err, database.ErrNotFound) + } else { + require.NoError(err) - // check all relevant data are stored - test.checkStakerInState(require, state, staker) - test.checkValidatorsSet(require, state, staker) - test.checkValidatorUptimes(require, state, staker) - test.checkDiffs(require, state, staker, 0 /*height*/) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + require.NoError(err) + require.Equal(test.expectedWeightDiff, weightDiff) + } - // rebuild the state - rebuiltState := newTestState(t, db) + publicKeyDiffBytes, err := state.validatorPublicKeyDiffsDB.Get(diffKey) + if test.expectedPublicKeyDiff.IsNothing() { + require.ErrorIs(err, database.ErrNotFound) + } else if expectedPublicKeyDiff := test.expectedPublicKeyDiff.Value(); expectedPublicKeyDiff == nil { + require.NoError(err) + require.Empty(publicKeyDiffBytes) + } else { + require.NoError(err) + require.Equal(expectedPublicKeyDiff, bls.PublicKeyFromValidUncompressedBytes(publicKeyDiffBytes)) + } - // check again that all relevant data are still available in rebuilt state - test.checkStakerInState(require, rebuiltState, staker) - test.checkValidatorsSet(require, rebuiltState, staker) - test.checkValidatorUptimes(require, rebuiltState, staker) - test.checkDiffs(require, rebuiltState, staker, 0 /*height*/) - }) - } + // re-load the state from disk + state = newTestState(t, db) + } + }) } } -func createPermissionlessValidatorTx(r *require.Assertions, subnetID ids.ID, validatorsData txs.Validator) *txs.AddPermissionlessValidatorTx { +func createPermissionlessValidatorTx(t testing.TB, subnetID ids.ID, validatorsData txs.Validator) *txs.AddPermissionlessValidatorTx { var sig signer.Signer = &signer.Empty{} if subnetID == constants.PrimaryNetworkID { sk, err := bls.NewSecretKey() - r.NoError(err) + require.NoError(t, err) sig = signer.NewProofOfPossession(sk) } From 803d0c4d777aa587949df49f40b21b17cacafeed Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 22 Oct 2024 07:07:59 -0400 Subject: [PATCH 095/155] nit --- vms/platformvm/state/state_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 419586f7871c..29d37c15a341 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -428,7 +428,7 @@ func TestPersistStakers(t *testing.T) { require.Equal(expectedPublicKeyDiff, bls.PublicKeyFromValidUncompressedBytes(publicKeyDiffBytes)) } - // re-load the state from disk + // re-load the state from disk for the second iteration state = newTestState(t, db) } }) From a2c777337e26d7e63426644d61b00c8e0d21a81c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 22 Oct 2024 08:57:04 -0400 Subject: [PATCH 096/155] Update test and populate public keys during startup --- vms/platformvm/state/state.go | 21 ++++++--- vms/platformvm/state/state_test.go | 72 ++++++++++++++++++++++++++---- 2 files changed, 78 insertions(+), 15 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index bed7b4746fbe..8a81931f1111 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1718,19 +1718,28 @@ func (s *state) loadPendingValidators() error { // Invariant: initValidatorSets requires loadCurrentValidators to have already // been called. func (s *state) initValidatorSets() error { - for subnetID, validators := range s.currentStakers.validators { + primaryNetworkValidators := s.currentStakers.validators[constants.PrimaryNetworkID] + for subnetID, subnetValidators := range s.currentStakers.validators { if s.validators.Count(subnetID) != 0 { // Enforce the invariant that the validator set is empty here. return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) } - for nodeID, validator := range validators { - validatorStaker := validator.validator - if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + for nodeID, subnetValidator := range subnetValidators { + primaryValidator, ok := primaryNetworkValidators[nodeID] + if !ok { + return fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) + } + + var ( + primaryStaker = primaryValidator.validator + subnetStaker = subnetValidator.validator + ) + if err := s.validators.AddStaker(subnetID, nodeID, primaryStaker.PublicKey, subnetStaker.TxID, subnetStaker.Weight); err != nil { return err } - delegatorIterator := iterator.FromTree(validator.delegators) + delegatorIterator := iterator.FromTree(subnetValidator.delegators) for delegatorIterator.Next() { delegatorStaker := delegatorIterator.Value() if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { @@ -2106,7 +2115,7 @@ func (s *state) writeCurrentStakersSubnetDiff( // This should never happen as the primary network diffs are // written last and subnet validator times must be a subset // of the primary network validator times. - return errMissingPrimaryNetworkValidator + return fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) } } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 29d37c15a341..b725761bab9a 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -117,10 +117,10 @@ func TestPersistStakers(t *testing.T) { primaryValidatorDuration = 28 * 24 * time.Hour primaryDelegatorDuration = 14 * 24 * time.Hour subnetValidatorDuration = 21 * 24 * time.Hour - subnetDelegatorDuration = 14 * 24 * time.Hour primaryValidatorReward = iota primaryDelegatorReward + subnetValidatorReward ) var ( primaryValidatorStartTime = time.Now().Truncate(time.Second) @@ -131,6 +131,10 @@ func TestPersistStakers(t *testing.T) { primaryDelegatorEndTime = primaryDelegatorStartTime.Add(primaryDelegatorDuration) primaryDelegatorEndTimeUnix = uint64(primaryDelegatorEndTime.Unix()) + subnetValidatorStartTime = primaryValidatorStartTime + subnetValidatorEndTime = subnetValidatorStartTime.Add(subnetValidatorDuration) + subnetValidatorEndTimeUnix = uint64(subnetValidatorEndTime.Unix()) + primaryValidatorData = txs.Validator{ NodeID: ids.GenerateTestNodeID(), End: primaryValidatorEndTimeUnix, @@ -141,6 +145,13 @@ func TestPersistStakers(t *testing.T) { End: primaryDelegatorEndTimeUnix, Wght: 6789, } + subnetValidatorData = txs.Validator{ + NodeID: primaryValidatorData.NodeID, + End: subnetValidatorEndTimeUnix, + Wght: 9876, + } + + subnetID = ids.GenerateTestID() ) unsignedAddPrimaryNetworkValidator := createPermissionlessValidatorTx(t, constants.PrimaryNetworkID, primaryValidatorData) @@ -179,6 +190,18 @@ func TestPersistStakers(t *testing.T) { ) require.NoError(t, err) + unsignedAddSubnetValidator := createPermissionlessValidatorTx(t, subnetID, subnetValidatorData) + addSubnetValidator := &txs.Tx{Unsigned: unsignedAddSubnetValidator} + require.NoError(t, addSubnetValidator.Initialize(txs.Codec)) + + subnetCurrentValidatorStaker, err := NewCurrentStaker( + addSubnetValidator.ID(), + unsignedAddSubnetValidator, + subnetValidatorStartTime, + subnetValidatorReward, + ) + require.NoError(t, err) + tests := map[string]struct { initialStakers []*Staker initialTxs []*txs.Tx @@ -246,6 +269,23 @@ func TestPersistStakers(t *testing.T) { expectedPendingValidator: primaryNetworkPendingValidatorStaker, expectedPendingDelegators: []*Staker{primaryNetworkPendingDelegatorStaker}, }, + "add current subnet validator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: subnetCurrentValidatorStaker, + tx: addSubnetValidator, + expectedCurrentValidator: subnetCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: subnetCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: subnetCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: subnetCurrentValidatorStaker.Weight, + }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](nil), + }, "delete current primary network validator": { initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, @@ -294,6 +334,16 @@ func TestPersistStakers(t *testing.T) { staker: primaryNetworkPendingDelegatorStaker, expectedPendingValidator: primaryNetworkPendingValidatorStaker, }, + "delete current subnet validator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker, subnetCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator, addSubnetValidator}, + staker: subnetCurrentValidatorStaker, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: subnetCurrentValidatorStaker.Weight, + }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](primaryNetworkCurrentValidatorStaker.PublicKey), + }, } for name, test := range tests { @@ -364,18 +414,22 @@ func TestPersistStakers(t *testing.T) { if test.expectedCurrentValidator == nil { require.ErrorIs(err, database.ErrNotFound) - // Only current validators should have uptimes - _, _, err := state.GetUptime(test.staker.NodeID) - require.ErrorIs(err, database.ErrNotFound) + if test.staker.SubnetID == constants.PrimaryNetworkID { + // Uptimes are only considered for primary network validators + _, _, err := state.GetUptime(test.staker.NodeID) + require.ErrorIs(err, database.ErrNotFound) + } } else { require.NoError(err) require.Equal(test.expectedCurrentValidator, currentValidator) - // Current validators should also have uptimes - upDuration, lastUpdated, err := state.GetUptime(currentValidator.NodeID) - require.NoError(err) - require.Zero(upDuration) - require.Equal(currentValidator.StartTime, lastUpdated) + if test.staker.SubnetID == constants.PrimaryNetworkID { + // Uptimes are only considered for primary network validators + upDuration, lastUpdated, err := state.GetUptime(currentValidator.NodeID) + require.NoError(err) + require.Zero(upDuration) + require.Equal(currentValidator.StartTime, lastUpdated) + } } pendingValidator, err := state.GetPendingValidator(test.staker.SubnetID, test.staker.NodeID) From 95c42a16942d6ba0a8a67a0ebd29ed7129310efa Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 23 Oct 2024 11:39:45 -0400 Subject: [PATCH 097/155] comment --- vms/platformvm/state/state.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 8a81931f1111..725b9e010c40 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1726,6 +1726,8 @@ func (s *state) initValidatorSets() error { } for nodeID, subnetValidator := range subnetValidators { + // The subnet validator's Public Key is inherited from the + // corresponding primary network validator. primaryValidator, ok := primaryNetworkValidators[nodeID] if !ok { return fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) From 21cdc32e2b6508a09ff390fdee7f7b5bf90620fe Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 24 Oct 2024 10:28:07 -0400 Subject: [PATCH 098/155] Update P-chain state staker tests --- vms/platformvm/state/state_test.go | 1203 ++++++++++++---------------- 1 file changed, 514 insertions(+), 689 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 6204540bd615..e9096af13a1c 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -5,7 +5,6 @@ package state import ( "context" - "fmt" "math" "math/rand" "sync" @@ -28,6 +27,8 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -109,620 +110,387 @@ func TestStateSyncGenesis(t *testing.T) { ) } -// Whenever we store a staker, a whole bunch a data structures are updated -// This test is meant to capture which updates are carried out -func TestPersistStakers(t *testing.T) { - tests := map[string]struct { - // Insert or delete a staker to state and store it - storeStaker func(*require.Assertions, ids.ID /*=subnetID*/, *state) *Staker +// Whenever we add or remove a staker, a number of on-disk data structures +// should be updated. +// +// This test verifies that the on-disk data structures are updated as expected. +func TestState_writeStakers(t *testing.T) { + const ( + primaryValidatorDuration = 28 * 24 * time.Hour + primaryDelegatorDuration = 14 * 24 * time.Hour + subnetValidatorDuration = 21 * 24 * time.Hour + + primaryValidatorReward = iota + primaryDelegatorReward + subnetValidatorReward + ) + var ( + primaryValidatorStartTime = time.Now().Truncate(time.Second) + primaryValidatorEndTime = primaryValidatorStartTime.Add(primaryValidatorDuration) + primaryValidatorEndTimeUnix = uint64(primaryValidatorEndTime.Unix()) + + primaryDelegatorStartTime = primaryValidatorStartTime + primaryDelegatorEndTime = primaryDelegatorStartTime.Add(primaryDelegatorDuration) + primaryDelegatorEndTimeUnix = uint64(primaryDelegatorEndTime.Unix()) + + subnetValidatorStartTime = primaryValidatorStartTime + subnetValidatorEndTime = subnetValidatorStartTime.Add(subnetValidatorDuration) + subnetValidatorEndTimeUnix = uint64(subnetValidatorEndTime.Unix()) + + primaryValidatorData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: primaryValidatorEndTimeUnix, + Wght: 1234, + } + primaryDelegatorData = txs.Validator{ + NodeID: primaryValidatorData.NodeID, + End: primaryDelegatorEndTimeUnix, + Wght: 6789, + } + subnetValidatorData = txs.Validator{ + NodeID: primaryValidatorData.NodeID, + End: subnetValidatorEndTimeUnix, + Wght: 9876, + } - // Check that the staker is duly stored/removed in P-chain state - checkStakerInState func(*require.Assertions, *state, *Staker) + subnetID = ids.GenerateTestID() + ) - // Check whether validators are duly reported in the validator set, - // with the right weight and showing the BLS key - checkValidatorsSet func(*require.Assertions, *state, *Staker) + unsignedAddPrimaryNetworkValidator := createPermissionlessValidatorTx(t, constants.PrimaryNetworkID, primaryValidatorData) + addPrimaryNetworkValidator := &txs.Tx{Unsigned: unsignedAddPrimaryNetworkValidator} + require.NoError(t, addPrimaryNetworkValidator.Initialize(txs.Codec)) - // Check that node duly track stakers uptimes - checkValidatorUptimes func(*require.Assertions, *state, *Staker) + primaryNetworkPendingValidatorStaker, err := NewPendingStaker( + addPrimaryNetworkValidator.ID(), + unsignedAddPrimaryNetworkValidator, + ) + require.NoError(t, err) - // Check whether weight/bls keys diffs are duly stored - checkDiffs func(*require.Assertions, *state, *Staker, uint64) - }{ - "add current validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(endTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - ) + primaryNetworkCurrentValidatorStaker, err := NewCurrentStaker( + addPrimaryNetworkValidator.ID(), + unsignedAddPrimaryNetworkValidator, + primaryValidatorStartTime, + primaryValidatorReward, + ) + require.NoError(t, err) - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) + unsignedAddPrimaryNetworkDelegator := createPermissionlessDelegatorTx(constants.PrimaryNetworkID, primaryDelegatorData) + addPrimaryNetworkDelegator := &txs.Tx{Unsigned: unsignedAddPrimaryNetworkDelegator} + require.NoError(t, addPrimaryNetworkDelegator.Initialize(txs.Codec)) - staker, err := NewCurrentStaker( - addPermValTx.ID(), - utx, - time.Unix(startTime, 0), - validatorReward, - ) - r.NoError(err) + primaryNetworkPendingDelegatorStaker, err := NewPendingStaker( + addPrimaryNetworkDelegator.ID(), + unsignedAddPrimaryNetworkDelegator, + ) + require.NoError(t, err) - r.NoError(s.PutCurrentValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - retrievedStaker, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.Equal(staker, retrievedStaker) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - r.Equal( - &validators.GetValidatorOutput{ - NodeID: staker.NodeID, - PublicKey: staker.PublicKey, - Weight: staker.Weight, - }, - valsMap[staker.NodeID], - ) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - upDuration, lastUpdated, err := s.GetUptime(staker.NodeID) - if staker.SubnetID != constants.PrimaryNetworkID { - // only primary network validators have uptimes - r.ErrorIs(err, database.ErrNotFound) - } else { - r.NoError(err) - r.Equal(upDuration, time.Duration(0)) - r.Equal(lastUpdated, staker.StartTime) - } - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: false, - Amount: staker.Weight, - }, weightDiff) - - blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - if staker.SubnetID == constants.PrimaryNetworkID { - r.NoError(err) - r.Nil(blsDiffBytes) - } else { - r.ErrorIs(err, database.ErrNotFound) - } - }, - }, - "add current delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert the delegator and its validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(valEndTime), - Wght: 1234, - } - validatorReward uint64 = 5678 + primaryNetworkCurrentDelegatorStaker, err := NewCurrentStaker( + addPrimaryNetworkDelegator.ID(), + unsignedAddPrimaryNetworkDelegator, + primaryDelegatorStartTime, + primaryDelegatorReward, + ) + require.NoError(t, err) - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - delegatorReward uint64 = 5432 - ) + unsignedAddSubnetValidator := createPermissionlessValidatorTx(t, subnetID, subnetValidatorData) + addSubnetValidator := &txs.Tx{Unsigned: unsignedAddSubnetValidator} + require.NoError(t, addSubnetValidator.Initialize(txs.Codec)) - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) + subnetCurrentValidatorStaker, err := NewCurrentStaker( + addSubnetValidator.ID(), + unsignedAddSubnetValidator, + subnetValidatorStartTime, + subnetValidatorReward, + ) + require.NoError(t, err) - val, err := NewCurrentStaker( - addPermValTx.ID(), - utxVal, - time.Unix(valStartTime, 0), - validatorReward, - ) - r.NoError(err) + tests := map[string]struct { + initialStakers []*Staker + initialTxs []*txs.Tx - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) + // Staker to insert or remove + staker *Staker + addStakerTx *txs.Tx // If tx is nil, the staker is being removed - del, err := NewCurrentStaker( - addPermDelTx.ID(), - utxDel, - time.Unix(delStartTime, 0), - delegatorReward, - ) - r.NoError(err) + // Check that the staker is duly stored/removed in P-chain state + expectedCurrentValidator *Staker + expectedPendingValidator *Staker + expectedCurrentDelegators []*Staker + expectedPendingDelegators []*Staker - r.NoError(s.PutCurrentValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + // Check that the validator entry has been set correctly in the + // in-memory validator set. + expectedValidatorSetOutput *validators.GetValidatorOutput - s.PutCurrentDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.True(delIt.Next()) - retrievedDelegator := delIt.Value() - r.False(delIt.Next()) - delIt.Release() - r.Equal(staker, retrievedDelegator) + // Check whether weight/bls keys diffs are duly stored + expectedWeightDiff *ValidatorWeightDiff + expectedPublicKeyDiff maybe.Maybe[*bls.PublicKey] + }{ + "add current primary network validator": { + staker: primaryNetworkCurrentValidatorStaker, + addStakerTx: addPrimaryNetworkValidator, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: primaryNetworkCurrentValidatorStaker.Weight, }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - valOut := valsMap[staker.NodeID] - r.Equal(valOut.NodeID, staker.NodeID) - r.Equal(valOut.Weight, val.Weight+staker.Weight) + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](nil), + }, + "add current primary network delegator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkCurrentDelegatorStaker, + addStakerTx: addPrimaryNetworkDelegator, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedCurrentDelegators: []*Staker{primaryNetworkCurrentDelegatorStaker}, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentDelegatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentDelegatorStaker.Weight + primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: primaryNetworkCurrentDelegatorStaker.Weight, }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // validator's weight must increase of delegator's weight amount - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: false, - Amount: staker.Weight, - }, weightDiff) + }, + "add pending primary network validator": { + staker: primaryNetworkPendingValidatorStaker, + addStakerTx: addPrimaryNetworkValidator, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, + }, + "add pending primary network delegator": { + initialStakers: []*Staker{primaryNetworkPendingValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkPendingDelegatorStaker, + addStakerTx: addPrimaryNetworkDelegator, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, + expectedPendingDelegators: []*Staker{primaryNetworkPendingDelegatorStaker}, + }, + "add current subnet validator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: subnetCurrentValidatorStaker, + addStakerTx: addSubnetValidator, + expectedCurrentValidator: subnetCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: subnetCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: subnetCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: false, + Amount: subnetCurrentValidatorStaker.Weight, }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](nil), }, - "add pending validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(startTime), - End: uint64(endTime), - Wght: 1234, - } - ) - - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - staker, err := NewPendingStaker( - addPermValTx.ID(), - utx, - ) - r.NoError(err) - - r.NoError(s.PutPendingValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - return staker + "delete current primary network validator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkCurrentValidatorStaker, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: primaryNetworkCurrentValidatorStaker.Weight, }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - retrievedStaker, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.Equal(staker, retrievedStaker) + expectedPublicKeyDiff: maybe.Some(primaryNetworkCurrentValidatorStaker.PublicKey), + }, + "delete current primary network delegator": { + initialStakers: []*Staker{ + primaryNetworkCurrentValidatorStaker, + primaryNetworkCurrentDelegatorStaker, + }, + initialTxs: []*txs.Tx{ + addPrimaryNetworkValidator, + addPrimaryNetworkDelegator, + }, + staker: primaryNetworkCurrentDelegatorStaker, + expectedCurrentValidator: primaryNetworkCurrentValidatorStaker, + expectedValidatorSetOutput: &validators.GetValidatorOutput{ + NodeID: primaryNetworkCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: primaryNetworkCurrentValidatorStaker.Weight, + }, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: primaryNetworkCurrentDelegatorStaker.Weight, }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - // pending validators are not showed in validators set - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) + }, + "delete pending primary network validator": { + initialStakers: []*Staker{primaryNetworkPendingValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator}, + staker: primaryNetworkPendingValidatorStaker, + }, + "delete pending primary network delegator": { + initialStakers: []*Staker{ + primaryNetworkPendingValidatorStaker, + primaryNetworkPendingDelegatorStaker, }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // pending validators uptime is not tracked - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) + initialTxs: []*txs.Tx{ + addPrimaryNetworkValidator, + addPrimaryNetworkDelegator, }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // pending validators weight diff and bls diffs are not stored - _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - - _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) + staker: primaryNetworkPendingDelegatorStaker, + expectedPendingValidator: primaryNetworkPendingValidatorStaker, + }, + "delete current subnet validator": { + initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker, subnetCurrentValidatorStaker}, + initialTxs: []*txs.Tx{addPrimaryNetworkValidator, addSubnetValidator}, + staker: subnetCurrentValidatorStaker, + expectedWeightDiff: &ValidatorWeightDiff{ + Decrease: true, + Amount: subnetCurrentValidatorStaker.Weight, }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](primaryNetworkCurrentValidatorStaker.PublicKey), }, - "add pending delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert the delegator and its validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(valStartTime), - End: uint64(valEndTime), - Wght: 1234, - } + } - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - Start: uint64(delStartTime), - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - ) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) + db := memdb.New() + state := newTestState(t, db) + + addOrDeleteStaker := func(staker *Staker, add bool) { + if add { + switch { + case staker.Priority.IsCurrentValidator(): + require.NoError(state.PutCurrentValidator(staker)) + case staker.Priority.IsPendingValidator(): + require.NoError(state.PutPendingValidator(staker)) + case staker.Priority.IsCurrentDelegator(): + state.PutCurrentDelegator(staker) + case staker.Priority.IsPendingDelegator(): + state.PutPendingDelegator(staker) + } + } else { + switch { + case staker.Priority.IsCurrentValidator(): + state.DeleteCurrentValidator(staker) + case staker.Priority.IsPendingValidator(): + state.DeletePendingValidator(staker) + case staker.Priority.IsCurrentDelegator(): + state.DeleteCurrentDelegator(staker) + case staker.Priority.IsPendingDelegator(): + state.DeletePendingDelegator(staker) + } + } + } - val, err := NewPendingStaker(addPermValTx.ID(), utxVal) - r.NoError(err) + // create and store the initial stakers + for _, staker := range test.initialStakers { + addOrDeleteStaker(staker, true) + } + for _, tx := range test.initialTxs { + state.AddTx(tx, status.Committed) + } - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) + state.SetHeight(0) + require.NoError(state.Commit()) - del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) - r.NoError(err) + // create and store the staker under test + addOrDeleteStaker(test.staker, test.addStakerTx != nil) + if test.addStakerTx != nil { + state.AddTx(test.addStakerTx, status.Committed) + } - r.NoError(s.PutPendingValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + state.SetHeight(1) + require.NoError(state.Commit()) - s.PutPendingDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + // Perform the checks once immediately after committing to the + // state, and once after re-loading the state from disk. + for i := 0; i < 2; i++ { + currentValidator, err := state.GetCurrentValidator(test.staker.SubnetID, test.staker.NodeID) + if test.expectedCurrentValidator == nil { + require.ErrorIs(err, database.ErrNotFound) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.True(delIt.Next()) - retrievedDelegator := delIt.Value() - r.False(delIt.Next()) - delIt.Release() - r.Equal(staker, retrievedDelegator) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, - }, - "delete current validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // add them remove the validator - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(endTime), - Wght: 1234, + if test.staker.SubnetID == constants.PrimaryNetworkID { + // Uptimes are only considered for primary network validators + _, _, err := state.GetUptime(test.staker.NodeID) + require.ErrorIs(err, database.ErrNotFound) } - validatorReward uint64 = 5678 - ) - - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - staker, err := NewCurrentStaker( - addPermValTx.ID(), - utx, - time.Unix(startTime, 0), - validatorReward, - ) - r.NoError(err) - - r.NoError(s.PutCurrentValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) + } else { + require.NoError(err) + require.Equal(test.expectedCurrentValidator, currentValidator) + + if test.staker.SubnetID == constants.PrimaryNetworkID { + // Uptimes are only considered for primary network validators + upDuration, lastUpdated, err := state.GetUptime(currentValidator.NodeID) + require.NoError(err) + require.Zero(upDuration) + require.Equal(currentValidator.StartTime, lastUpdated) + } + } - s.DeleteCurrentValidator(staker) - r.NoError(s.Commit()) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - _, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - // deleted validators are not showed in the validators set anymore - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - // uptimes of delete validators are dropped - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: true, - Amount: staker.Weight, - }, weightDiff) - - blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - if staker.SubnetID == constants.PrimaryNetworkID { - r.NoError(err) - r.Equal(bls.PublicKeyFromValidUncompressedBytes(blsDiffBytes), staker.PublicKey) + pendingValidator, err := state.GetPendingValidator(test.staker.SubnetID, test.staker.NodeID) + if test.expectedPendingValidator == nil { + require.ErrorIs(err, database.ErrNotFound) } else { - r.ErrorIs(err, database.ErrNotFound) + require.NoError(err) + require.Equal(test.expectedPendingValidator, pendingValidator) } - }, - }, - "delete current delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert validator and delegator, then remove the delegator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - End: uint64(valEndTime), - Wght: 1234, - } - validatorReward uint64 = 5678 - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, - } - delegatorReward uint64 = 5432 + it, err := state.GetCurrentDelegatorIterator(test.staker.SubnetID, test.staker.NodeID) + require.NoError(err) + require.Equal( + test.expectedCurrentDelegators, + iterator.ToSlice(it), ) - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - val, err := NewCurrentStaker( - addPermValTx.ID(), - utxVal, - time.Unix(valStartTime, 0), - validatorReward, + it, err = state.GetPendingDelegatorIterator(test.staker.SubnetID, test.staker.NodeID) + require.NoError(err) + require.Equal( + test.expectedPendingDelegators, + iterator.ToSlice(it), ) - r.NoError(err) - - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) - del, err := NewCurrentStaker( - addPermDelTx.ID(), - utxDel, - time.Unix(delStartTime, 0), - delegatorReward, + require.Equal( + test.expectedValidatorSetOutput, + state.validators.GetMap(test.staker.SubnetID)[test.staker.NodeID], ) - r.NoError(err) - - r.NoError(s.PutCurrentValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - s.PutCurrentDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeleteCurrentDelegator(del) - r.NoError(s.Commit()) - - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.False(delIt.Next()) - delIt.Release() - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) - r.NoError(err) - - valsMap := s.validators.GetMap(staker.SubnetID) - r.Contains(valsMap, staker.NodeID) - valOut := valsMap[staker.NodeID] - r.Equal(valOut.NodeID, staker.NodeID) - r.Equal(valOut.Weight, val.Weight) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // validator's weight must decrease of delegator's weight amount - weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.NoError(err) - weightDiff, err := unmarshalWeightDiff(weightDiffBytes) - r.NoError(err) - r.Equal(&ValidatorWeightDiff{ - Decrease: true, - Amount: staker.Weight, - }, weightDiff) - }, - }, - "delete pending validator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - var ( - startTime = time.Now().Unix() - endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(startTime), - End: uint64(endTime), - Wght: 1234, - } - ) - - utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utx} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - staker, err := NewPendingStaker( - addPermValTx.ID(), - utx, - ) - r.NoError(err) - - r.NoError(s.PutPendingValidator(staker)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeletePendingValidator(staker) - r.NoError(s.Commit()) + diffKey := marshalDiffKey(test.staker.SubnetID, 1, test.staker.NodeID) + weightDiffBytes, err := state.validatorWeightDiffsDB.Get(diffKey) + if test.expectedWeightDiff == nil { + require.ErrorIs(err, database.ErrNotFound) + } else { + require.NoError(err) - return staker - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - _, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { - _, _, err := s.GetUptime(staker.NodeID) - r.ErrorIs(err, database.ErrNotFound) - }, - checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + require.NoError(err) + require.Equal(test.expectedWeightDiff, weightDiff) + } - _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) - r.ErrorIs(err, database.ErrNotFound) - }, - }, - "delete pending delegator": { - storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { - // insert validator and delegator the remove the validator - var ( - valStartTime = time.Now().Truncate(time.Second).Unix() - delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() - delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() - valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - - validatorsData = txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(valStartTime), - End: uint64(valEndTime), - Wght: 1234, - } + publicKeyDiffBytes, err := state.validatorPublicKeyDiffsDB.Get(diffKey) + if test.expectedPublicKeyDiff.IsNothing() { + require.ErrorIs(err, database.ErrNotFound) + } else { + require.NoError(err) - delegatorData = txs.Validator{ - NodeID: validatorsData.NodeID, - Start: uint64(delStartTime), - End: uint64(delEndTime), - Wght: validatorsData.Wght / 2, + expectedPublicKeyDiff := test.expectedPublicKeyDiff.Value() + if expectedPublicKeyDiff != nil { + require.Equal(expectedPublicKeyDiff, bls.PublicKeyFromValidUncompressedBytes(publicKeyDiffBytes)) + } else { + require.Empty(publicKeyDiffBytes) } - ) - - utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) - addPermValTx := &txs.Tx{Unsigned: utxVal} - r.NoError(addPermValTx.Initialize(txs.Codec)) - - val, err := NewPendingStaker(addPermValTx.ID(), utxVal) - r.NoError(err) - - utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) - addPermDelTx := &txs.Tx{Unsigned: utxDel} - r.NoError(addPermDelTx.Initialize(txs.Codec)) - - del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) - r.NoError(err) - - r.NoError(s.PutPendingValidator(val)) - s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker - - s.PutPendingDelegator(del) - s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker - r.NoError(s.Commit()) - - s.DeletePendingDelegator(del) - r.NoError(s.Commit()) - return del - }, - checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { - delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) - r.NoError(err) - r.False(delIt.Next()) - delIt.Release() - }, - checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { - valsMap := s.validators.GetMap(staker.SubnetID) - r.NotContains(valsMap, staker.NodeID) - }, - checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, - checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, - }, - } - - subnetIDs := []ids.ID{constants.PrimaryNetworkID, ids.GenerateTestID()} - for _, subnetID := range subnetIDs { - for name, test := range tests { - t.Run(fmt.Sprintf("%s - subnetID %s", name, subnetID), func(t *testing.T) { - require := require.New(t) - - db := memdb.New() - state := newTestState(t, db) - - // create and store the staker - staker := test.storeStaker(require, subnetID, state) - - // check all relevant data are stored - test.checkStakerInState(require, state, staker) - test.checkValidatorsSet(require, state, staker) - test.checkValidatorUptimes(require, state, staker) - test.checkDiffs(require, state, staker, 0 /*height*/) - - // rebuild the state - rebuiltState := newTestState(t, db) + } - // check again that all relevant data are still available in rebuilt state - test.checkStakerInState(require, rebuiltState, staker) - test.checkValidatorsSet(require, rebuiltState, staker) - test.checkValidatorUptimes(require, rebuiltState, staker) - test.checkDiffs(require, rebuiltState, staker, 0 /*height*/) - }) - } + // re-load the state from disk for the second iteration + state = newTestState(t, db) + } + }) } } -func createPermissionlessValidatorTx(r *require.Assertions, subnetID ids.ID, validatorsData txs.Validator) *txs.AddPermissionlessValidatorTx { +func createPermissionlessValidatorTx(t testing.TB, subnetID ids.ID, validatorsData txs.Validator) *txs.AddPermissionlessValidatorTx { var sig signer.Signer = &signer.Empty{} if subnetID == constants.PrimaryNetworkID { sk, err := bls.NewSecretKey() - r.NoError(err) + require.NoError(t, err) sig = signer.NewProofOfPossession(sk) } @@ -988,43 +756,49 @@ func TestValidatorWeightDiff(t *testing.T) { } } -// Tests PutCurrentValidator, DeleteCurrentValidator, GetCurrentValidator, -// ApplyValidatorWeightDiffs, ApplyValidatorPublicKeyDiffs -func TestStateAddRemoveValidator(t *testing.T) { +func TestState_ApplyValidatorDiffs(t *testing.T) { require := require.New(t) state := newTestState(t, memdb.New()) var ( - numNodes = 3 - subnetID = ids.GenerateTestID() - startTime = time.Now() - endTime = startTime.Add(24 * time.Hour) - stakers = make([]Staker, numNodes) + numNodes = 5 + subnetID = ids.GenerateTestID() + startTime = time.Now() + endTime = startTime.Add(24 * time.Hour) + primaryStakers = make([]Staker, numNodes) + subnetStakers = make([]Staker, numNodes) ) - for i := 0; i < numNodes; i++ { - stakers[i] = Staker{ + for i := range primaryStakers { + sk, err := bls.NewSecretKey() + require.NoError(err) + + primaryStakers[i] = Staker{ TxID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), + PublicKey: bls.PublicFromSecretKey(sk), + SubnetID: constants.PrimaryNetworkID, Weight: uint64(i + 1), StartTime: startTime.Add(time.Duration(i) * time.Second), EndTime: endTime.Add(time.Duration(i) * time.Second), PotentialReward: uint64(i + 1), } - if i%2 == 0 { - stakers[i].SubnetID = subnetID - } else { - sk, err := bls.NewSecretKey() - require.NoError(err) - stakers[i].PublicKey = bls.PublicFromSecretKey(sk) - stakers[i].SubnetID = constants.PrimaryNetworkID + } + for i, primaryStaker := range primaryStakers { + subnetStakers[i] = Staker{ + TxID: ids.GenerateTestID(), + NodeID: primaryStaker.NodeID, + PublicKey: nil, // Key is inherited from the primary network + SubnetID: subnetID, + Weight: uint64(i + 1), + StartTime: primaryStaker.StartTime, + EndTime: primaryStaker.EndTime, + PotentialReward: uint64(i + 1), } } type diff struct { addedValidators []Staker - addedDelegators []Staker - removedDelegators []Staker removedValidators []Staker expectedPrimaryValidatorSet map[ids.NodeID]*validators.GetValidatorOutput @@ -1037,101 +811,172 @@ func TestStateAddRemoveValidator(t *testing.T) { expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Add a subnet validator - addedValidators: []Staker{stakers[0]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[0].NodeID: { - NodeID: stakers[0].NodeID, - Weight: stakers[0].Weight, + // Add primary validator 0 + addedValidators: []Staker{primaryStakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, }, }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Remove a subnet validator - removedValidators: []Staker{stakers[0]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + // Add subnet validator 0 + addedValidators: []Staker{subnetStakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, + }, + }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + subnetStakers[0].NodeID: { + NodeID: subnetStakers[0].NodeID, + Weight: subnetStakers[0].Weight, + }, + }, }, - { // Add a primary network validator - addedValidators: []Staker{stakers[1]}, + { + // Remove subnet validator 0 + removedValidators: []Staker{subnetStakers[0]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[0].NodeID: { + NodeID: primaryStakers[0].NodeID, + PublicKey: primaryStakers[0].PublicKey, + Weight: primaryStakers[0].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { - // Do nothing + // Add primary network validator 1, and subnet validator 1 + addedValidators: []Staker{primaryStakers[1], subnetStakers[1]}, + // Remove primary network validator 0, and subnet validator 1 + removedValidators: []Staker{primaryStakers[0], subnetStakers[1]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[1].NodeID: { + NodeID: primaryStakers[1].NodeID, + PublicKey: primaryStakers[1].PublicKey, + Weight: primaryStakers[1].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, - { // Remove a primary network validator - removedValidators: []Staker{stakers[1]}, - expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + { + // Add primary network validator 2, and subnet validator 2 + addedValidators: []Staker{primaryStakers[2], subnetStakers[2]}, + // Remove primary network validator 1 + removedValidators: []Staker{primaryStakers[1]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + primaryStakers[2].NodeID: { + NodeID: primaryStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: primaryStakers[2].Weight, + }, + }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ + subnetStakers[2].NodeID: { + NodeID: subnetStakers[2].NodeID, + Weight: subnetStakers[2].Weight, + }, + }, }, { - // Add 2 subnet validators and a primary network validator - addedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + // Add primary network and subnet validators 3 & 4 + addedValidators: []Staker{primaryStakers[3], primaryStakers[4], subnetStakers[3], subnetStakers[4]}, expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[1].NodeID: { - NodeID: stakers[1].NodeID, - PublicKey: stakers[1].PublicKey, - Weight: stakers[1].Weight, + primaryStakers[2].NodeID: { + NodeID: primaryStakers[2].NodeID, + PublicKey: primaryStakers[2].PublicKey, + Weight: primaryStakers[2].Weight, + }, + primaryStakers[3].NodeID: { + NodeID: primaryStakers[3].NodeID, + PublicKey: primaryStakers[3].PublicKey, + Weight: primaryStakers[3].Weight, + }, + primaryStakers[4].NodeID: { + NodeID: primaryStakers[4].NodeID, + PublicKey: primaryStakers[4].PublicKey, + Weight: primaryStakers[4].Weight, }, }, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ - stakers[0].NodeID: { - NodeID: stakers[0].NodeID, - Weight: stakers[0].Weight, + subnetStakers[2].NodeID: { + NodeID: subnetStakers[2].NodeID, + Weight: subnetStakers[2].Weight, + }, + subnetStakers[3].NodeID: { + NodeID: subnetStakers[3].NodeID, + Weight: subnetStakers[3].Weight, }, - stakers[2].NodeID: { - NodeID: stakers[2].NodeID, - Weight: stakers[2].Weight, + subnetStakers[4].NodeID: { + NodeID: subnetStakers[4].NodeID, + Weight: subnetStakers[4].Weight, }, }, }, { - // Remove 2 subnet validators and a primary network validator. - removedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + // Remove primary network and subnet validators 2 & 3 & 4 + removedValidators: []Staker{ + primaryStakers[2], primaryStakers[3], primaryStakers[4], + subnetStakers[2], subnetStakers[3], subnetStakers[4], + }, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + }, + { + // Do nothing expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, } for currentIndex, diff := range diffs { - for _, added := range diff.addedValidators { - added := added - require.NoError(state.PutCurrentValidator(&added)) - } - for _, added := range diff.addedDelegators { - added := added - state.PutCurrentDelegator(&added) + d, err := NewDiffOn(state) + require.NoError(err) + + type subnetIDNodeID struct { + subnetID ids.ID + nodeID ids.NodeID } - for _, removed := range diff.removedDelegators { - removed := removed - state.DeleteCurrentDelegator(&removed) + var expectedValidators set.Set[subnetIDNodeID] + for _, added := range diff.addedValidators { + require.NoError(d.PutCurrentValidator(&added)) + + expectedValidators.Add(subnetIDNodeID{ + subnetID: added.SubnetID, + nodeID: added.NodeID, + }) } for _, removed := range diff.removedValidators { - removed := removed - state.DeleteCurrentValidator(&removed) + d.DeleteCurrentValidator(&removed) + + expectedValidators.Remove(subnetIDNodeID{ + subnetID: removed.SubnetID, + nodeID: removed.NodeID, + }) } + require.NoError(d.Apply(state)) + currentHeight := uint64(currentIndex + 1) state.SetHeight(currentHeight) require.NoError(state.Commit()) + // Verify that the current state is as expected. for _, added := range diff.addedValidators { + subnetNodeID := subnetIDNodeID{ + subnetID: added.SubnetID, + nodeID: added.NodeID, + } + if !expectedValidators.Contains(subnetNodeID) { + continue + } + gotValidator, err := state.GetCurrentValidator(added.SubnetID, added.NodeID) require.NoError(err) require.Equal(added, *gotValidator) @@ -1142,37 +987,47 @@ func TestStateAddRemoveValidator(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) } + primaryValidatorSet := state.validators.GetMap(constants.PrimaryNetworkID) + delete(primaryValidatorSet, defaultValidatorNodeID) // Ignore the genesis validator + require.Equal(diff.expectedPrimaryValidatorSet, primaryValidatorSet) + + require.Equal(diff.expectedSubnetValidatorSet, state.validators.GetMap(subnetID)) + + // Verify that applying diffs against the current state results in the + // expected state. for i := 0; i < currentIndex; i++ { prevDiff := diffs[i] prevHeight := uint64(i + 1) - primaryValidatorSet := copyValidatorSet(diff.expectedPrimaryValidatorSet) - require.NoError(state.ApplyValidatorWeightDiffs( - context.Background(), - primaryValidatorSet, - currentHeight, - prevHeight+1, - constants.PrimaryNetworkID, - )) - requireEqualWeightsValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) - - require.NoError(state.ApplyValidatorPublicKeyDiffs( - context.Background(), - primaryValidatorSet, - currentHeight, - prevHeight+1, - )) - requireEqualPublicKeysValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) - - subnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) - require.NoError(state.ApplyValidatorWeightDiffs( - context.Background(), - subnetValidatorSet, - currentHeight, - prevHeight+1, - subnetID, - )) - requireEqualWeightsValidatorSet(require, prevDiff.expectedSubnetValidatorSet, subnetValidatorSet) + { + primaryValidatorSet := copyValidatorSet(diff.expectedPrimaryValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs( + context.Background(), + primaryValidatorSet, + currentHeight, + prevHeight+1, + constants.PrimaryNetworkID, + )) + require.NoError(state.ApplyValidatorPublicKeyDiffs( + context.Background(), + primaryValidatorSet, + currentHeight, + prevHeight+1, + )) + require.Equal(prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) + } + + { + subnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs( + context.Background(), + subnetValidatorSet, + currentHeight, + prevHeight+1, + subnetID, + )) + require.Equal(prevDiff.expectedSubnetValidatorSet, subnetValidatorSet) + } } } } @@ -1188,36 +1043,6 @@ func copyValidatorSet( return result } -func requireEqualWeightsValidatorSet( - require *require.Assertions, - expected map[ids.NodeID]*validators.GetValidatorOutput, - actual map[ids.NodeID]*validators.GetValidatorOutput, -) { - require.Len(actual, len(expected)) - for nodeID, expectedVdr := range expected { - require.Contains(actual, nodeID) - - actualVdr := actual[nodeID] - require.Equal(expectedVdr.NodeID, actualVdr.NodeID) - require.Equal(expectedVdr.Weight, actualVdr.Weight) - } -} - -func requireEqualPublicKeysValidatorSet( - require *require.Assertions, - expected map[ids.NodeID]*validators.GetValidatorOutput, - actual map[ids.NodeID]*validators.GetValidatorOutput, -) { - require.Len(actual, len(expected)) - for nodeID, expectedVdr := range expected { - require.Contains(actual, nodeID) - - actualVdr := actual[nodeID] - require.Equal(expectedVdr.NodeID, actualVdr.NodeID) - require.Equal(expectedVdr.PublicKey, actualVdr.PublicKey) - } -} - func TestParsedStateBlock(t *testing.T) { var ( require = require.New(t) From 91a7cd7f30798db77b7c7049d9fe662b81914e7c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 24 Oct 2024 10:43:46 -0400 Subject: [PATCH 099/155] fix test --- vms/platformvm/state/state_test.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index e9096af13a1c..522e5a912401 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -278,15 +278,13 @@ func TestState_writeStakers(t *testing.T) { addStakerTx: addSubnetValidator, expectedCurrentValidator: subnetCurrentValidatorStaker, expectedValidatorSetOutput: &validators.GetValidatorOutput{ - NodeID: subnetCurrentValidatorStaker.NodeID, - PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, - Weight: subnetCurrentValidatorStaker.Weight, + NodeID: subnetCurrentValidatorStaker.NodeID, + Weight: subnetCurrentValidatorStaker.Weight, }, expectedWeightDiff: &ValidatorWeightDiff{ Decrease: false, Amount: subnetCurrentValidatorStaker.Weight, }, - expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](nil), }, "delete current primary network validator": { initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, @@ -344,7 +342,6 @@ func TestState_writeStakers(t *testing.T) { Decrease: true, Amount: subnetCurrentValidatorStaker.Weight, }, - expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](primaryNetworkCurrentValidatorStaker.PublicKey), }, } From c848feef30038c116ad8e7389c54abf92c8d69d5 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 24 Oct 2024 10:44:54 -0400 Subject: [PATCH 100/155] fix test --- vms/platformvm/state/state_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index c6f6c0d52ec7..f3c952ff90d8 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -278,13 +278,15 @@ func TestState_writeStakers(t *testing.T) { addStakerTx: addSubnetValidator, expectedCurrentValidator: subnetCurrentValidatorStaker, expectedValidatorSetOutput: &validators.GetValidatorOutput{ - NodeID: subnetCurrentValidatorStaker.NodeID, - Weight: subnetCurrentValidatorStaker.Weight, + NodeID: subnetCurrentValidatorStaker.NodeID, + PublicKey: primaryNetworkCurrentValidatorStaker.PublicKey, + Weight: subnetCurrentValidatorStaker.Weight, }, expectedWeightDiff: &ValidatorWeightDiff{ Decrease: false, Amount: subnetCurrentValidatorStaker.Weight, }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](nil), }, "delete current primary network validator": { initialStakers: []*Staker{primaryNetworkCurrentValidatorStaker}, @@ -342,6 +344,7 @@ func TestState_writeStakers(t *testing.T) { Decrease: true, Amount: subnetCurrentValidatorStaker.Weight, }, + expectedPublicKeyDiff: maybe.Some[*bls.PublicKey](primaryNetworkCurrentValidatorStaker.PublicKey), }, } From 79c9b40074f82ae33ebf0e4ac8ac2191c3e81144 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 24 Oct 2024 10:49:15 -0400 Subject: [PATCH 101/155] nit --- vms/platformvm/state/state.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index cfb5888ecfa9..c70547265cf1 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2079,12 +2079,11 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV } // TODO: Move validator set management out of the state package - // - // Attempt to update the stake metrics if !updateValidators { return nil } + // Update the stake metrics totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight of primary network: %w", err) From e8b21ec88d26f3bc2d2cf3899470cf4b65e2fc44 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 24 Oct 2024 17:10:34 -0400 Subject: [PATCH 102/155] Verify no SoV + legacy overlap --- vms/platformvm/state/diff_test.go | 15 +++++++++++++++ vms/platformvm/state/subnet_only_validator.go | 17 ++++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 3e1ca3b2cdce..9cc67ff8d9ef 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -315,6 +315,15 @@ func TestDiffSubnetOnlyValidatorsErrors(t *testing.T) { }, expectedErr: ErrMutatedSubnetOnlyValidator, }, + { + name: "conflicting legacy subnetID and nodeID pair", + initialEndAccumulatedFee: 1, + sov: SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + NodeID: defaultValidatorNodeID, + }, + expectedErr: ErrConflictingSubnetOnlyValidator, + }, { name: "duplicate active subnetID and nodeID pair", initialEndAccumulatedFee: 1, @@ -341,6 +350,12 @@ func TestDiffSubnetOnlyValidatorsErrors(t *testing.T) { state := newTestState(t, memdb.New()) + require.NoError(state.PutCurrentValidator(&Staker{ + TxID: ids.GenerateTestID(), + SubnetID: sov.SubnetID, + NodeID: defaultValidatorNodeID, + })) + sov.EndAccumulatedFee = test.initialEndAccumulatedFee require.NoError(state.PutSubnetOnlyValidator(sov)) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 978cb78c2727..9c58a60bf2e2 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -26,8 +26,9 @@ var ( _ btree.LessFunc[SubnetOnlyValidator] = SubnetOnlyValidator.Less _ utils.Sortable[SubnetOnlyValidator] = SubnetOnlyValidator{} - ErrMutatedSubnetOnlyValidator = errors.New("subnet only validator contains mutated constant fields") - ErrDuplicateSubnetOnlyValidator = errors.New("subnet only validator contains conflicting subnetID + nodeID pair") + ErrMutatedSubnetOnlyValidator = errors.New("subnet only validator contains mutated constant fields") + ErrConflictingSubnetOnlyValidator = errors.New("subnet only validator contains conflicting subnetID + nodeID pair") + ErrDuplicateSubnetOnlyValidator = errors.New("subnet only validator contains duplicate subnetID + nodeID pair") errUnexpectedSubnetIDNodeIDLength = fmt.Errorf("expected subnetID+nodeID entry length %d", subnetIDNodeIDEntryLength) ) @@ -233,7 +234,7 @@ func (d *subnetOnlyValidatorsDiff) hasSubnetOnlyValidator(subnetID ids.ID, nodeI return has, modified } -func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValidators, sov SubnetOnlyValidator) error { +func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov SubnetOnlyValidator) error { var ( prevWeight uint64 prevActive bool @@ -248,6 +249,16 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state SubnetOnlyValida prevWeight = priorSOV.Weight prevActive = priorSOV.EndAccumulatedFee != 0 case database.ErrNotFound: + // Verify that there is not a legacy subnet validator with the same + // subnetID+nodeID as this L1 validator. + _, err := state.GetCurrentValidator(sov.SubnetID, sov.NodeID) + if err == nil { + return ErrConflictingSubnetOnlyValidator + } + if err != database.ErrNotFound { + return err + } + has, err := state.HasSubnetOnlyValidator(sov.SubnetID, sov.NodeID) if err != nil { return err From 7f517c7ced96bbcaf7a929939acf3eebb5f2b644 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 24 Oct 2024 18:40:16 -0400 Subject: [PATCH 103/155] Fix legacy validator migration --- vms/platformvm/state/state.go | 627 +++++++++++++++-------------- vms/platformvm/state/state_test.go | 41 ++ 2 files changed, 359 insertions(+), 309 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 948dede49f1e..448f7414d78c 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1952,10 +1952,11 @@ func (s *state) write(updateValidators bool, height uint64) error { return errors.Join( s.writeBlocks(), s.writeExpiry(), - s.writeSubnetOnlyValidators(updateValidators, height), - s.writeCurrentStakers(updateValidators, height, codecVersion), + s.writeValidatorDiffs(height), + s.writeCurrentStakers(updateValidators, codecVersion), s.writePendingStakers(), s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers + s.writeSubnetOnlyValidators(updateValidators), s.writeTXs(), s.writeRewardUTXOs(), s.writeUTXOs(), @@ -2206,248 +2207,78 @@ func (s *state) writeExpiry() error { return nil } -// TODO: Add caching -func (s *state) writeSubnetOnlyValidators(updateValidators bool, height uint64) error { - // Write modified weights: - for subnetID, weight := range s.sovDiff.modifiedTotalWeight { - if err := database.PutUInt64(s.weightsDB, subnetID[:], weight); err != nil { - return err - } - } - maps.Clear(s.sovDiff.modifiedTotalWeight) - - historicalDiffs, err := s.makeSubnetOnlyValidatorHistoricalDiffs() - if err != nil { - return err +func (s *state) getInheritedPublicKey(nodeID ids.NodeID) (*bls.PublicKey, error) { + if vdr, ok := s.currentStakers.validators[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is still present. + return vdr.validator.PublicKey, nil } - for subnetIDNodeID, diff := range historicalDiffs { - diffKey := marshalDiffKey(subnetIDNodeID.subnetID, height, subnetIDNodeID.nodeID) - if diff.weightDiff.Amount != 0 { - err := s.validatorWeightDiffsDB.Put( - diffKey, - marshalWeightDiff(&diff.weightDiff), - ) - if err != nil { - return err - } - } - if !bytes.Equal(diff.prevPublicKey, diff.newPublicKey) { - err := s.validatorPublicKeyDiffsDB.Put( - diffKey, - diff.prevPublicKey, - ) - if err != nil { - return err - } - } + if vdr, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is being removed. + return vdr.validator.PublicKey, nil } - sovChanges := s.sovDiff.modified - // Perform deletions: - for validationID, sov := range sovChanges { - if sov.Weight != 0 { - // Additions and modifications are handled in the next loops. - continue - } - - // The next loops shouldn't consider this change. - delete(sovChanges, validationID) - - priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err == database.ErrNotFound { - // Deleting a non-existent validator is a noop. This can happen if - // the validator was added and then immediately removed. - continue - } - if err != nil { - return err - } - - subnetIDNodeID := subnetIDNodeID{ - subnetID: sov.SubnetID, - nodeID: sov.NodeID, - } - subnetIDNodeIDKey := subnetIDNodeID.Marshal() - if err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey); err != nil { - return err - } - - if priorSOV.isActive() { - delete(s.activeSOVLookup, validationID) - s.activeSOVs.Delete(priorSOV) - err = deleteSubnetOnlyValidator(s.activeDB, validationID) - } else { - err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) - } - if err != nil { - return err - } - - // TODO: Move the validator set management out of the state package - if !updateValidators { - continue - } + // This should never happen as the primary network diffs are + // written last and subnet validator times must be a subset + // of the primary network validator times. + return nil, fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) +} - nodeID := ids.EmptyNodeID - if priorSOV.isActive() { - nodeID = priorSOV.NodeID - } - if err := s.validators.RemoveWeight(priorSOV.SubnetID, nodeID, priorSOV.Weight); err != nil { - return fmt.Errorf("failed to delete SoV validator: %w", err) - } +func (s *state) writeValidatorDiffs(height uint64) error { + type validatorChanges struct { + weightDiff ValidatorWeightDiff + prevPublicKey []byte + newPublicKey []byte } + changes := make(map[subnetIDNodeID]*validatorChanges, len(s.sovDiff.modified)) - // Perform modifications: - for validationID, sov := range sovChanges { - priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err == database.ErrNotFound { - // New additions are handled in the next loop. - continue - } - if err != nil { - return err - } - - if priorSOV.isActive() { - delete(s.activeSOVLookup, validationID) - s.activeSOVs.Delete(priorSOV) - err = deleteSubnetOnlyValidator(s.activeDB, validationID) - } else { - err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) - } - if err != nil { - return err - } - - if sov.isActive() { - s.activeSOVLookup[validationID] = sov - s.activeSOVs.ReplaceOrInsert(sov) - err = putSubnetOnlyValidator(s.activeDB, sov) - } else { - err = putSubnetOnlyValidator(s.inactiveDB, sov) - } - if err != nil { - return err - } - - // The next loop shouldn't consider this change. - delete(sovChanges, validationID) - - // TODO: Move the validator set management out of the state package - if !updateValidators { - continue - } - - switch { - case !priorSOV.isActive() && sov.isActive(): - // This validator is being activated. - pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) - err = errors.Join( - s.validators.RemoveWeight(sov.SubnetID, ids.EmptyNodeID, priorSOV.Weight), - s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight), - ) - case priorSOV.isActive() && !sov.isActive(): - // This validator is being deactivated. - inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) - if inactiveWeight == 0 { - err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) - } else { - err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) - } - err = errors.Join( - err, - s.validators.RemoveWeight(sov.SubnetID, sov.NodeID, priorSOV.Weight), - ) - default: - // This validator's active status isn't changing. - nodeID := ids.EmptyNodeID - if sov.isActive() { - nodeID = sov.NodeID + for subnetID, subnetDiffs := range s.currentStakers.validatorDiffs { + for nodeID, diff := range subnetDiffs { + change := &validatorChanges{ + weightDiff: ValidatorWeightDiff{ + Decrease: diff.validatorStatus == deleted, + }, } - if priorSOV.Weight < sov.Weight { - err = s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight-priorSOV.Weight) - } else if priorSOV.Weight > sov.Weight { - err = s.validators.RemoveWeight(sov.SubnetID, nodeID, priorSOV.Weight-sov.Weight) + if diff.validatorStatus != unmodified { + change.weightDiff.Amount = diff.validator.Weight } - } - if err != nil { - return err - } - } - - // Perform additions: - for validationID, sov := range sovChanges { - validationID := validationID - - subnetIDNodeID := subnetIDNodeID{ - subnetID: sov.SubnetID, - nodeID: sov.NodeID, - } - subnetIDNodeIDKey := subnetIDNodeID.Marshal() - if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { - return err - } - isActive := sov.isActive() - if isActive { - s.activeSOVLookup[validationID] = sov - s.activeSOVs.ReplaceOrInsert(sov) - err = putSubnetOnlyValidator(s.activeDB, sov) - } else { - err = putSubnetOnlyValidator(s.inactiveDB, sov) - } - if err != nil { - return err - } + for _, staker := range diff.deletedDelegators { + if err := change.weightDiff.Add(true, staker.Weight); err != nil { + return fmt.Errorf("failed to decrease node weight diff: %w", err) + } + } - // TODO: Move the validator set management out of the state package - if !updateValidators { - continue - } + addedDelegatorIterator := iterator.FromTree(diff.addedDelegators) + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + if err := change.weightDiff.Add(false, staker.Weight); err != nil { + addedDelegatorIterator.Release() + return fmt.Errorf("failed to increase node weight diff: %w", err) + } + } + addedDelegatorIterator.Release() - if isActive { - pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) - if err := s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { - return fmt.Errorf("failed to add SoV validator: %w", err) + pk, err := s.getInheritedPublicKey(nodeID) + if err != nil { + return err + } + if pk != nil { + switch diff.validatorStatus { + case added: + change.newPublicKey = bls.PublicKeyToUncompressedBytes(pk) + case deleted: + change.prevPublicKey = bls.PublicKeyToUncompressedBytes(pk) + } } - continue - } - // This validator is inactive - inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) - if inactiveWeight == 0 { - err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) - } else { - err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) - } - if err != nil { - return err + subnetIDNodeID := subnetIDNodeID{ + subnetID: subnetID, + nodeID: nodeID, + } + changes[subnetIDNodeID] = change } } - s.sovDiff = newSubnetOnlyValidatorsDiff() - return nil -} - -type validatorChanges struct { - weightDiff ValidatorWeightDiff - prevPublicKey []byte - newPublicKey []byte -} - -func getOrDefault[K comparable, V any](m map[K]*V, k K) *V { - if v, ok := m[k]; ok { - return v - } - - v := new(V) - m[k] = v - return v -} - -func (s *state) makeSubnetOnlyValidatorHistoricalDiffs() (map[subnetIDNodeID]*validatorChanges, error) { - changes := make(map[subnetIDNodeID]*validatorChanges, len(s.sovDiff.modified)) - // Perform deletions: for validationID := range s.sovDiff.modified { priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) @@ -2455,7 +2286,7 @@ func (s *state) makeSubnetOnlyValidatorHistoricalDiffs() (map[subnetIDNodeID]*va continue } if err != nil { - return nil, err + return err } var ( @@ -2474,7 +2305,7 @@ func (s *state) makeSubnetOnlyValidatorHistoricalDiffs() (map[subnetIDNodeID]*va } if err := diff.weightDiff.Add(true, priorSOV.Weight); err != nil { - return nil, err + return err } } @@ -2501,14 +2332,45 @@ func (s *state) makeSubnetOnlyValidatorHistoricalDiffs() (map[subnetIDNodeID]*va } if err := diff.weightDiff.Add(false, sov.Weight); err != nil { - return nil, err + return err + } + } + + for subnetIDNodeID, diff := range changes { + diffKey := marshalDiffKey(subnetIDNodeID.subnetID, height, subnetIDNodeID.nodeID) + if diff.weightDiff.Amount != 0 { + err := s.validatorWeightDiffsDB.Put( + diffKey, + marshalWeightDiff(&diff.weightDiff), + ) + if err != nil { + return err + } + } + if !bytes.Equal(diff.prevPublicKey, diff.newPublicKey) { + err := s.validatorPublicKeyDiffsDB.Put( + diffKey, + diff.prevPublicKey, + ) + if err != nil { + return err + } } } + return nil +} + +func getOrDefault[K comparable, V any](m map[K]*V, k K) *V { + if v, ok := m[k]; ok { + return v + } - return changes, nil + v := new(V) + m[k] = v + return v } -func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { +func (s *state) writeCurrentStakers(updateValidators bool, codecVersion uint16) error { for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { // We must write the primary network stakers last because writing subnet // validator diffs may depend on the primary network validator diffs to @@ -2517,33 +2379,31 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV continue } - delete(s.currentStakers.validatorDiffs, subnetID) - err := s.writeCurrentStakersSubnetDiff( subnetID, validatorDiffs, updateValidators, - height, codecVersion, ) if err != nil { return err } + + delete(s.currentStakers.validatorDiffs, subnetID) } if validatorDiffs, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID]; ok { - delete(s.currentStakers.validatorDiffs, constants.PrimaryNetworkID) - err := s.writeCurrentStakersSubnetDiff( constants.PrimaryNetworkID, validatorDiffs, updateValidators, - height, codecVersion, ) if err != nil { return err } + + delete(s.currentStakers.validatorDiffs, constants.PrimaryNetworkID) } // TODO: Move validator set management out of the state package @@ -2566,7 +2426,6 @@ func (s *state) writeCurrentStakersSubnetDiff( subnetID ids.ID, validatorDiffs map[ids.NodeID]*diffValidator, updateValidators bool, - height uint64, codecVersion uint16, ) error { // Select db to write to @@ -2579,52 +2438,13 @@ func (s *state) writeCurrentStakersSubnetDiff( // Record the change in weight and/or public key for each validator. for nodeID, validatorDiff := range validatorDiffs { - var ( - staker *Staker - pk *bls.PublicKey - weightDiff = &ValidatorWeightDiff{ - Decrease: validatorDiff.validatorStatus == deleted, - } - ) - if validatorDiff.validatorStatus != unmodified { - staker = validatorDiff.validator - - pk = staker.PublicKey - // For non-primary network validators, the public key is inherited - // from the primary network. - if subnetID != constants.PrimaryNetworkID { - if vdr, ok := s.currentStakers.validators[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { - // The primary network validator is still present after - // writing. - pk = vdr.validator.PublicKey - } else if vdr, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { - // The primary network validator is being removed during - // writing. - pk = vdr.validator.PublicKey - } else { - // This should never happen as the primary network diffs are - // written last and subnet validator times must be a subset - // of the primary network validator times. - return fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) - } - } - - weightDiff.Amount = staker.Weight - } - + weightDiff := &ValidatorWeightDiff{} switch validatorDiff.validatorStatus { case added: - if pk != nil { - // Record that the public key for the validator is being added. - // This means the prior value for the public key was nil. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - nil, - ) - if err != nil { - return err - } - } + staker := validatorDiff.validator + + weightDiff.Decrease = false + weightDiff.Amount = staker.Weight // The validator is being added. // @@ -2653,23 +2473,10 @@ func (s *state) writeCurrentStakersSubnetDiff( s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) case deleted: - if pk != nil { - // Record that the public key for the validator is being - // removed. This means we must record the prior value of the - // public key. - // - // Note: We store the uncompressed public key here as it is - // significantly more efficient to parse when applying diffs. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - bls.PublicKeyToUncompressedBytes(pk), - ) - if err != nil { - return err - } - } + weightDiff.Decrease = true + weightDiff.Amount = validatorDiff.validator.Weight - if err := validatorDB.Delete(staker.TxID[:]); err != nil { + if err := validatorDB.Delete(validatorDiff.validator.TxID[:]); err != nil { return fmt.Errorf("failed to delete current staker: %w", err) } @@ -2691,14 +2498,6 @@ func (s *state) writeCurrentStakersSubnetDiff( continue } - err = s.validatorWeightDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - marshalWeightDiff(weightDiff), - ) - if err != nil { - return err - } - // TODO: Move the validator set management out of the state package if !updateValidators { continue @@ -2708,11 +2507,17 @@ func (s *state) writeCurrentStakersSubnetDiff( err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) } else { if validatorDiff.validatorStatus == added { + var pk *bls.PublicKey + pk, err = s.getInheritedPublicKey(nodeID) + if err != nil { + return err + } + err = s.validators.AddStaker( subnetID, nodeID, pk, - staker.TxID, + validatorDiff.validator.TxID, weightDiff.Amount, ) } else { @@ -2824,6 +2629,210 @@ func writePendingDiff( return nil } +// TODO: Add caching +// +// writeSubnetOnlyValidators must be called after writeCurrentStakers to ensure +// any legacy validators that were removed and then re-added as SoVs are +// correctly written +func (s *state) writeSubnetOnlyValidators(updateValidators bool) error { + // Write modified weights: + for subnetID, weight := range s.sovDiff.modifiedTotalWeight { + if err := database.PutUInt64(s.weightsDB, subnetID[:], weight); err != nil { + return err + } + } + maps.Clear(s.sovDiff.modifiedTotalWeight) + + sovChanges := s.sovDiff.modified + // Perform deletions: + for validationID, sov := range sovChanges { + if sov.Weight != 0 { + // Additions and modifications are handled in the next loops. + continue + } + + // The next loops shouldn't consider this change. + delete(sovChanges, validationID) + + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + if err == database.ErrNotFound { + // Deleting a non-existent validator is a noop. This can happen if + // the validator was added and then immediately removed. + continue + } + if err != nil { + return err + } + + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + subnetIDNodeIDKey := subnetIDNodeID.Marshal() + if err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey); err != nil { + return err + } + + if priorSOV.isActive() { + delete(s.activeSOVLookup, validationID) + s.activeSOVs.Delete(priorSOV) + err = deleteSubnetOnlyValidator(s.activeDB, validationID) + } else { + err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) + } + if err != nil { + return err + } + + // TODO: Move the validator set management out of the state package + if !updateValidators { + continue + } + + nodeID := ids.EmptyNodeID + if priorSOV.isActive() { + nodeID = priorSOV.NodeID + } + if err := s.validators.RemoveWeight(priorSOV.SubnetID, nodeID, priorSOV.Weight); err != nil { + return fmt.Errorf("failed to delete SoV validator: %w", err) + } + } + + // Perform modifications: + for validationID, sov := range sovChanges { + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + if err == database.ErrNotFound { + // New additions are handled in the next loop. + continue + } + if err != nil { + return err + } + + if priorSOV.isActive() { + delete(s.activeSOVLookup, validationID) + s.activeSOVs.Delete(priorSOV) + err = deleteSubnetOnlyValidator(s.activeDB, validationID) + } else { + err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) + } + if err != nil { + return err + } + + if sov.isActive() { + s.activeSOVLookup[validationID] = sov + s.activeSOVs.ReplaceOrInsert(sov) + err = putSubnetOnlyValidator(s.activeDB, sov) + } else { + err = putSubnetOnlyValidator(s.inactiveDB, sov) + } + if err != nil { + return err + } + + // The next loop shouldn't consider this change. + delete(sovChanges, validationID) + + // TODO: Move the validator set management out of the state package + if !updateValidators { + continue + } + + switch { + case !priorSOV.isActive() && sov.isActive(): + // This validator is being activated. + pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) + err = errors.Join( + s.validators.RemoveWeight(sov.SubnetID, ids.EmptyNodeID, priorSOV.Weight), + s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight), + ) + case priorSOV.isActive() && !sov.isActive(): + // This validator is being deactivated. + inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) + if inactiveWeight == 0 { + err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) + } else { + err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) + } + err = errors.Join( + err, + s.validators.RemoveWeight(sov.SubnetID, sov.NodeID, priorSOV.Weight), + ) + default: + // This validator's active status isn't changing. + nodeID := ids.EmptyNodeID + if sov.isActive() { + nodeID = sov.NodeID + } + if priorSOV.Weight < sov.Weight { + err = s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight-priorSOV.Weight) + } else if priorSOV.Weight > sov.Weight { + err = s.validators.RemoveWeight(sov.SubnetID, nodeID, priorSOV.Weight-sov.Weight) + } + } + if err != nil { + return err + } + } + + // Perform additions: + for validationID, sov := range sovChanges { + validationID := validationID + + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + subnetIDNodeIDKey := subnetIDNodeID.Marshal() + if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { + return err + } + + var ( + isActive = sov.isActive() + err error + ) + if isActive { + s.activeSOVLookup[validationID] = sov + s.activeSOVs.ReplaceOrInsert(sov) + err = putSubnetOnlyValidator(s.activeDB, sov) + } else { + err = putSubnetOnlyValidator(s.inactiveDB, sov) + } + if err != nil { + return err + } + + // TODO: Move the validator set management out of the state package + if !updateValidators { + continue + } + + if isActive { + pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) + if err := s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { + return fmt.Errorf("failed to add SoV validator: %w", err) + } + continue + } + + // This validator is inactive + inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) + if inactiveWeight == 0 { + err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) + } else { + err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) + } + if err != nil { + return err + } + } + + s.sovDiff = newSubnetOnlyValidatorsDiff() + return nil +} + func (s *state) writeTXs() error { for txID, txStatus := range s.addedTxs { txID := txID diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index b5ebc572fea8..f4f50ebdf05d 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1889,3 +1889,44 @@ func TestSubnetOnlyValidators(t *testing.T) { }) } } + +func TestSubnetOnlyValidatorAfterLegacyRemoval(t *testing.T) { + require := require.New(t) + + db := memdb.New() + state := newTestState(t, db) + + legacyStaker := &Staker{ + TxID: ids.GenerateTestID(), + NodeID: defaultValidatorNodeID, + PublicKey: nil, + SubnetID: ids.GenerateTestID(), + Weight: 1, + StartTime: genesistest.DefaultValidatorStartTime, + EndTime: genesistest.DefaultValidatorEndTime, + PotentialReward: 0, + } + require.NoError(state.PutCurrentValidator(legacyStaker)) + + state.SetHeight(1) + require.NoError(state.Commit()) + + state.DeleteCurrentValidator(legacyStaker) + + sov := SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: legacyStaker.SubnetID, + NodeID: legacyStaker.NodeID, + PublicKey: utils.RandomBytes(bls.PublicKeyLen), + RemainingBalanceOwner: utils.RandomBytes(32), + DeactivationOwner: utils.RandomBytes(32), + StartTime: 1, + Weight: 2, + MinNonce: 3, + EndAccumulatedFee: 4, + } + require.NoError(state.PutSubnetOnlyValidator(sov)) + + state.SetHeight(2) + require.NoError(state.Commit()) +} From 08bd9e3ae82cb0f8e4008d38d5d7ecbff232dbe4 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 25 Oct 2024 16:31:17 -0400 Subject: [PATCH 104/155] ACP-77: Add subnetIDNodeID struct --- vms/platformvm/state/state_test.go | 4 -- vms/platformvm/state/subnet_id_node_id.go | 37 +++++++++++ .../state/subnet_id_node_id_test.go | 66 +++++++++++++++++++ 3 files changed, 103 insertions(+), 4 deletions(-) create mode 100644 vms/platformvm/state/subnet_id_node_id.go create mode 100644 vms/platformvm/state/subnet_id_node_id_test.go diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index b965af1531eb..85df176ce42a 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -944,10 +944,6 @@ func TestState_ApplyValidatorDiffs(t *testing.T) { d, err := NewDiffOn(state) require.NoError(err) - type subnetIDNodeID struct { - subnetID ids.ID - nodeID ids.NodeID - } var expectedValidators set.Set[subnetIDNodeID] for _, added := range diff.addedValidators { require.NoError(d.PutCurrentValidator(&added)) diff --git a/vms/platformvm/state/subnet_id_node_id.go b/vms/platformvm/state/subnet_id_node_id.go new file mode 100644 index 000000000000..208c1cf8f447 --- /dev/null +++ b/vms/platformvm/state/subnet_id_node_id.go @@ -0,0 +1,37 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +// subnetIDNodeID = [subnetID] + [nodeID] +const subnetIDNodeIDEntryLength = ids.IDLen + ids.NodeIDLen + +var errUnexpectedSubnetIDNodeIDLength = fmt.Errorf("expected subnetID+nodeID entry length %d", subnetIDNodeIDEntryLength) + +type subnetIDNodeID struct { + subnetID ids.ID + nodeID ids.NodeID +} + +func (s *subnetIDNodeID) Marshal() []byte { + data := make([]byte, subnetIDNodeIDEntryLength) + copy(data, s.subnetID[:]) + copy(data[ids.IDLen:], s.nodeID[:]) + return data +} + +func (s *subnetIDNodeID) Unmarshal(data []byte) error { + if len(data) != subnetIDNodeIDEntryLength { + return errUnexpectedSubnetIDNodeIDLength + } + + copy(s.subnetID[:], data) + copy(s.nodeID[:], data[ids.IDLen:]) + return nil +} diff --git a/vms/platformvm/state/subnet_id_node_id_test.go b/vms/platformvm/state/subnet_id_node_id_test.go new file mode 100644 index 000000000000..4ed720b95a8e --- /dev/null +++ b/vms/platformvm/state/subnet_id_node_id_test.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + "github.com/thepudds/fzgen/fuzzer" +) + +func FuzzSubnetIDNodeIDMarshal(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + var v subnetIDNodeID + fz := fuzzer.NewFuzzer(data) + fz.Fill(&v) + + marshalledData := v.Marshal() + + var parsed subnetIDNodeID + require.NoError(parsed.Unmarshal(marshalledData)) + require.Equal(v, parsed) + }) +} + +func FuzzSubnetIDNodeIDUnmarshal(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + var v subnetIDNodeID + if err := v.Unmarshal(data); err != nil { + require.ErrorIs(err, errUnexpectedSubnetIDNodeIDLength) + return + } + + marshalledData := v.Marshal() + require.Equal(data, marshalledData) + }) +} + +func FuzzSubnetIDNodeIDMarshalOrdering(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + var ( + v0 subnetIDNodeID + v1 subnetIDNodeID + ) + fz := fuzzer.NewFuzzer(data) + fz.Fill(&v0, &v1) + + if v0.subnetID == v1.subnetID { + return + } + + key0 := v0.Marshal() + key1 := v1.Marshal() + require.Equal( + t, + v0.subnetID.Compare(v1.subnetID), + bytes.Compare(key0, key1), + ) + }) +} From 78c1c3de58bf69e26f2764ee5b3e840413a32997 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 17:50:18 -0400 Subject: [PATCH 105/155] nit --- vms/platformvm/state/subnet_id_node_id_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/subnet_id_node_id_test.go b/vms/platformvm/state/subnet_id_node_id_test.go index 4ed720b95a8e..848893170e2b 100644 --- a/vms/platformvm/state/subnet_id_node_id_test.go +++ b/vms/platformvm/state/subnet_id_node_id_test.go @@ -42,7 +42,7 @@ func FuzzSubnetIDNodeIDUnmarshal(f *testing.F) { }) } -func FuzzSubnetIDNodeIDMarshalOrdering(f *testing.F) { +func FuzzSubnetIDNodeIDOrdering(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { var ( v0 subnetIDNodeID From d2137ef6d9a0b4de0d9badb9cf2e352fd75e54b9 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 18:16:45 -0400 Subject: [PATCH 106/155] fix merge --- vms/platformvm/state/subnet_only_validator.go | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 9c58a60bf2e2..6d2a0f6afb9e 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -19,9 +19,6 @@ import ( safemath "github.com/ava-labs/avalanchego/utils/math" ) -// subnetIDNodeID = [subnetID] + [nodeID] -const subnetIDNodeIDEntryLength = ids.IDLen + ids.NodeIDLen - var ( _ btree.LessFunc[SubnetOnlyValidator] = SubnetOnlyValidator.Less _ utils.Sortable[SubnetOnlyValidator] = SubnetOnlyValidator{} @@ -29,8 +26,6 @@ var ( ErrMutatedSubnetOnlyValidator = errors.New("subnet only validator contains mutated constant fields") ErrConflictingSubnetOnlyValidator = errors.New("subnet only validator contains conflicting subnetID + nodeID pair") ErrDuplicateSubnetOnlyValidator = errors.New("subnet only validator contains duplicate subnetID + nodeID pair") - - errUnexpectedSubnetIDNodeIDLength = fmt.Errorf("expected subnetID+nodeID entry length %d", subnetIDNodeIDEntryLength) ) type SubnetOnlyValidators interface { @@ -175,28 +170,6 @@ func deleteSubnetOnlyValidator(db database.KeyValueDeleter, validationID ids.ID) return db.Delete(validationID[:]) } -type subnetIDNodeID struct { - subnetID ids.ID - nodeID ids.NodeID -} - -func (s *subnetIDNodeID) Marshal() []byte { - data := make([]byte, subnetIDNodeIDEntryLength) - copy(data, s.subnetID[:]) - copy(data[ids.IDLen:], s.nodeID[:]) - return data -} - -func (s *subnetIDNodeID) Unmarshal(data []byte) error { - if len(data) != subnetIDNodeIDEntryLength { - return errUnexpectedSubnetIDNodeIDLength - } - - copy(s.subnetID[:], data) - copy(s.nodeID[:], data[ids.IDLen:]) - return nil -} - type subnetOnlyValidatorsDiff struct { numAddedActive int // May be negative modifiedTotalWeight map[ids.ID]uint64 // subnetID -> totalWeight From 5845d11eed4895c72890e43a5c5a50c8ac0d9c1d Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 19:36:20 -0400 Subject: [PATCH 107/155] Split writeCurrentStakers into multiple functions --- vms/platformvm/state/stakers.go | 29 +++ vms/platformvm/state/state.go | 362 ++++++++++++++++---------------- 2 files changed, 208 insertions(+), 183 deletions(-) diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index 658796855958..14e4dcf7b1ef 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -5,6 +5,7 @@ package state import ( "errors" + "fmt" "github.com/google/btree" @@ -273,6 +274,34 @@ type diffValidator struct { deletedDelegators map[ids.ID]*Staker } +func (d *diffValidator) WeightDiff() (ValidatorWeightDiff, error) { + weightDiff := ValidatorWeightDiff{ + Decrease: d.validatorStatus == deleted, + } + if d.validatorStatus != unmodified { + weightDiff.Amount = d.validator.Weight + } + + for _, staker := range d.deletedDelegators { + if err := weightDiff.Add(true, staker.Weight); err != nil { + return ValidatorWeightDiff{}, fmt.Errorf("failed to decrease node weight diff: %w", err) + } + } + + addedDelegatorIterator := iterator.FromTree(d.addedDelegators) + defer addedDelegatorIterator.Release() + + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + + if err := weightDiff.Add(false, staker.Weight); err != nil { + return ValidatorWeightDiff{}, fmt.Errorf("failed to increase node weight diff: %w", err) + } + } + + return weightDiff, nil +} + // GetValidator attempts to fetch the validator with the given subnetID and // nodeID. // Invariant: Assumes that the validator will never be removed and then added. diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index c70547265cf1..33ae0e61e2ee 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -4,6 +4,7 @@ package state import ( + "bytes" "context" "errors" "fmt" @@ -14,6 +15,7 @@ import ( "github.com/google/btree" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -1792,7 +1794,9 @@ func (s *state) write(updateValidators bool, height uint64) error { return errors.Join( s.writeBlocks(), s.writeExpiry(), - s.writeCurrentStakers(updateValidators, height, codecVersion), + s.updateValidatorManager(updateValidators), + s.writeValidatorDiffs(height), + s.writeCurrentStakers(codecVersion), s.writePendingStakers(), s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers s.writeTXs(), @@ -2040,47 +2044,75 @@ func (s *state) writeExpiry() error { return nil } -func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { +// getInheritedPublicKey returns the primary network validator's public key. +// +// Note: This function may return a nil public key and no error if the primary +// network validator does not have a public key. +func (s *state) getInheritedPublicKey(nodeID ids.NodeID) (*bls.PublicKey, error) { + if vdr, ok := s.currentStakers.validators[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is present. + return vdr.validator.PublicKey, nil + } + if vdr, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { + // The primary network validator is being removed. + return vdr.validator.PublicKey, nil + } + return nil, fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) +} + +// updateValidatorManager updates the validator manager with the pending +// validator set changes. +// +// This function must be called prior to writeCurrentStakers. +func (s *state) updateValidatorManager(updateValidators bool) error { + if !updateValidators { + return nil + } + for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { - // We must write the primary network stakers last because writing subnet - // validator diffs may depend on the primary network validator diffs to - // inherit the public keys. - if subnetID == constants.PrimaryNetworkID { - continue - } + // Record the change in weight and/or public key for each validator. + for nodeID, diff := range validatorDiffs { + weightDiff, err := diff.WeightDiff() + if err != nil { + return err + } - delete(s.currentStakers.validatorDiffs, subnetID) + if weightDiff.Amount == 0 { + continue // No weight change; go to the next validator. + } - err := s.writeCurrentStakersSubnetDiff( - subnetID, - validatorDiffs, - updateValidators, - height, - codecVersion, - ) - if err != nil { - return err - } - } + if weightDiff.Decrease { + if err := s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount); err != nil { + return fmt.Errorf("failed to reduce validator weight: %w", err) + } + continue + } - if validatorDiffs, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID]; ok { - delete(s.currentStakers.validatorDiffs, constants.PrimaryNetworkID) + if diff.validatorStatus != added { + if err := s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount); err != nil { + return fmt.Errorf("failed to increase validator weight: %w", err) + } + continue + } - err := s.writeCurrentStakersSubnetDiff( - constants.PrimaryNetworkID, - validatorDiffs, - updateValidators, - height, - codecVersion, - ) - if err != nil { - return err - } - } + pk, err := s.getInheritedPublicKey(nodeID) + if err != nil { + // This should never happen as there should always be a primary + // network validator corresponding to a subnet validator. + return err + } - // TODO: Move validator set management out of the state package - if !updateValidators { - return nil + err = s.validators.AddStaker( + subnetID, + nodeID, + pk, + diff.validator.TxID, + weightDiff.Amount, + ) + if err != nil { + return fmt.Errorf("failed to add validator: %w", err) + } + } } // Update the stake metrics @@ -2094,185 +2126,153 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV return nil } -func (s *state) writeCurrentStakersSubnetDiff( - subnetID ids.ID, - validatorDiffs map[ids.NodeID]*diffValidator, - updateValidators bool, - height uint64, - codecVersion uint16, -) error { - // Select db to write to - validatorDB := s.currentSubnetValidatorList - delegatorDB := s.currentSubnetDelegatorList - if subnetID == constants.PrimaryNetworkID { - validatorDB = s.currentValidatorList - delegatorDB = s.currentDelegatorList - } - - // Record the change in weight and/or public key for each validator. - for nodeID, validatorDiff := range validatorDiffs { - var ( - staker *Staker - pk *bls.PublicKey - weightDiff = &ValidatorWeightDiff{ - Decrease: validatorDiff.validatorStatus == deleted, - } - ) - if validatorDiff.validatorStatus != unmodified { - staker = validatorDiff.validator - - pk = staker.PublicKey - // For non-primary network validators, the public key is inherited - // from the primary network. - if subnetID != constants.PrimaryNetworkID { - if vdr, ok := s.currentStakers.validators[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { - // The primary network validator is still present after - // writing. - pk = vdr.validator.PublicKey - } else if vdr, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { - // The primary network validator is being removed during - // writing. - pk = vdr.validator.PublicKey - } else { - // This should never happen as the primary network diffs are - // written last and subnet validator times must be a subset - // of the primary network validator times. - return fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) - } +// writeValidatorDiffs writes the validator set diff contained by the pending +// validator set changes to disk. +// +// This function must be called prior to writeCurrentStakers. +func (s *state) writeValidatorDiffs(height uint64) error { + type validatorChanges struct { + weightDiff ValidatorWeightDiff + prevPublicKey []byte + newPublicKey []byte + } + changes := make(map[subnetIDNodeID]*validatorChanges) + + // Calculate the changes to the pre-ACP-77 validator set + for subnetID, subnetDiffs := range s.currentStakers.validatorDiffs { + for nodeID, diff := range subnetDiffs { + weightDiff, err := diff.WeightDiff() + if err != nil { + return err } - weightDiff.Amount = staker.Weight - } + pk, err := s.getInheritedPublicKey(nodeID) + if err != nil { + // This should never happen as there should always be a primary + // network validator corresponding to a subnet validator. + return err + } - switch validatorDiff.validatorStatus { - case added: + change := &validatorChanges{ + weightDiff: weightDiff, + } if pk != nil { - // Record that the public key for the validator is being added. - // This means the prior value for the public key was nil. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - nil, - ) - if err != nil { - return err + switch diff.validatorStatus { + case added: + change.newPublicKey = bls.PublicKeyToUncompressedBytes(pk) + case deleted: + change.prevPublicKey = bls.PublicKeyToUncompressedBytes(pk) } } - // The validator is being added. - // - // Invariant: It's impossible for a delegator to have been rewarded - // in the same block that the validator was added. - startTime := uint64(staker.StartTime.Unix()) - metadata := &validatorMetadata{ - txID: staker.TxID, - lastUpdated: staker.StartTime, - - UpDuration: 0, - LastUpdated: startTime, - StakerStartTime: startTime, - PotentialReward: staker.PotentialReward, - PotentialDelegateeReward: 0, + subnetIDNodeID := subnetIDNodeID{ + subnetID: subnetID, + nodeID: nodeID, } + changes[subnetIDNodeID] = change + } + } - metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) + // Write the changes to the database + for subnetIDNodeID, diff := range changes { + diffKey := marshalDiffKey(subnetIDNodeID.subnetID, height, subnetIDNodeID.nodeID) + if diff.weightDiff.Amount != 0 { + err := s.validatorWeightDiffsDB.Put( + diffKey, + marshalWeightDiff(&diff.weightDiff), + ) if err != nil { - return fmt.Errorf("failed to serialize current validator: %w", err) + return err } - - if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { - return fmt.Errorf("failed to write current validator to list: %w", err) + } + if !bytes.Equal(diff.prevPublicKey, diff.newPublicKey) { + err := s.validatorPublicKeyDiffsDB.Put( + diffKey, + diff.prevPublicKey, + ) + if err != nil { + return err } + } + } + return nil +} - s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) - case deleted: - if pk != nil { - // Record that the public key for the validator is being - // removed. This means we must record the prior value of the - // public key. - // - // Note: We store the uncompressed public key here as it is - // significantly more efficient to parse when applying diffs. - err := s.validatorPublicKeyDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - bls.PublicKeyToUncompressedBytes(pk), - ) - if err != nil { - return err - } - } +func (s *state) writeCurrentStakers(codecVersion uint16) error { + for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { + // Select db to write to + validatorDB := s.currentSubnetValidatorList + delegatorDB := s.currentSubnetDelegatorList + if subnetID == constants.PrimaryNetworkID { + validatorDB = s.currentValidatorList + delegatorDB = s.currentDelegatorList + } - if err := validatorDB.Delete(staker.TxID[:]); err != nil { - return fmt.Errorf("failed to delete current staker: %w", err) - } + // Record the change in weight and/or public key for each validator. + for nodeID, validatorDiff := range validatorDiffs { + switch validatorDiff.validatorStatus { + case added: + staker := validatorDiff.validator - s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) - } + // The validator is being added. + // + // Invariant: It's impossible for a delegator to have been rewarded + // in the same block that the validator was added. + startTime := uint64(staker.StartTime.Unix()) + metadata := &validatorMetadata{ + txID: staker.TxID, + lastUpdated: staker.StartTime, + + UpDuration: 0, + LastUpdated: startTime, + StakerStartTime: startTime, + PotentialReward: staker.PotentialReward, + PotentialDelegateeReward: 0, + } - err := writeCurrentDelegatorDiff( - delegatorDB, - weightDiff, - validatorDiff, - codecVersion, - ) - if err != nil { - return err - } + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) + if err != nil { + return fmt.Errorf("failed to serialize current validator: %w", err) + } - if weightDiff.Amount == 0 { - // No weight change to record; go to next validator. - continue - } + if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { + return fmt.Errorf("failed to write current validator to list: %w", err) + } - err = s.validatorWeightDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - marshalWeightDiff(weightDiff), - ) - if err != nil { - return err - } + s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) + case deleted: + if err := validatorDB.Delete(validatorDiff.validator.TxID[:]); err != nil { + return fmt.Errorf("failed to delete current staker: %w", err) + } - // TODO: Move the validator set management out of the state package - if !updateValidators { - continue - } + s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) + } - if weightDiff.Decrease { - err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) - } else { - if validatorDiff.validatorStatus == added { - err = s.validators.AddStaker( - subnetID, - nodeID, - pk, - staker.TxID, - weightDiff.Amount, - ) - } else { - err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + err := writeCurrentDelegatorDiff( + delegatorDB, + validatorDiff, + codecVersion, + ) + if err != nil { + return err } } - if err != nil { - return fmt.Errorf("failed to update validator weight: %w", err) - } } + maps.Clear(s.currentStakers.validatorDiffs) return nil } func writeCurrentDelegatorDiff( currentDelegatorList linkeddb.LinkedDB, - weightDiff *ValidatorWeightDiff, validatorDiff *diffValidator, codecVersion uint16, ) error { addedDelegatorIterator := iterator.FromTree(validatorDiff.addedDelegators) defer addedDelegatorIterator.Release() + for addedDelegatorIterator.Next() { staker := addedDelegatorIterator.Value() - if err := weightDiff.Add(false, staker.Weight); err != nil { - return fmt.Errorf("failed to increase node weight diff: %w", err) - } - metadata := &delegatorMetadata{ txID: staker.TxID, PotentialReward: staker.PotentialReward, @@ -2284,10 +2284,6 @@ func writeCurrentDelegatorDiff( } for _, staker := range validatorDiff.deletedDelegators { - if err := weightDiff.Add(true, staker.Weight); err != nil { - return fmt.Errorf("failed to decrease node weight diff: %w", err) - } - if err := currentDelegatorList.Delete(staker.TxID[:]); err != nil { return fmt.Errorf("failed to delete current staker: %w", err) } From d0d1602264c040c6202659454fc299f68d491f55 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 20:09:45 -0400 Subject: [PATCH 108/155] reduce diff --- vms/platformvm/state/state.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index d4353aafb9af..4e34a6bd1cba 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2418,7 +2418,7 @@ func (s *state) writeValidatorDiffs(height uint64) error { } changes := make(map[subnetIDNodeID]*validatorChanges, len(s.sovDiff.modified)) - // Perform pre-ACP-77 validator set changes: + // Calculate the changes to the pre-ACP-77 validator set for subnetID, subnetDiffs := range s.currentStakers.validatorDiffs { for nodeID, diff := range subnetDiffs { weightDiff, err := diff.WeightDiff() @@ -2428,6 +2428,8 @@ func (s *state) writeValidatorDiffs(height uint64) error { pk, err := s.getInheritedPublicKey(nodeID) if err != nil { + // This should never happen as there should always be a primary + // network validator corresponding to a subnet validator. return err } From d397375ea8a4cc8b5312cae64c48cdadecaacddf Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 20:20:08 -0400 Subject: [PATCH 109/155] reduce diff --- vms/platformvm/state/state.go | 85 ++++++++++++----------------------- 1 file changed, 29 insertions(+), 56 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 4e34a6bd1cba..841fe972278b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2720,49 +2720,18 @@ func (s *state) writeSubnetOnlyValidators() error { // The next loops shouldn't consider this change. delete(sovChanges, validationID) - priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err == database.ErrNotFound { - // Deleting a non-existent validator is a noop. This can happen if - // the validator was added and then immediately removed. - continue - } - if err != nil { - return err - } - subnetIDNodeID := subnetIDNodeID{ subnetID: sov.SubnetID, nodeID: sov.NodeID, } subnetIDNodeIDKey := subnetIDNodeID.Marshal() - if err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey); err != nil { - return err - } - - if priorSOV.isActive() { - delete(s.activeSOVLookup, validationID) - s.activeSOVs.Delete(priorSOV) - err = deleteSubnetOnlyValidator(s.activeDB, validationID) - } else { - err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) - } - if err != nil { - return err - } - } - - // Perform modifications: - for validationID, sov := range sovChanges { - priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err == database.ErrNotFound { - // New additions are handled in the next loop. - continue - } + err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey) if err != nil { return err } - if priorSOV.isActive() { + priorSOV, wasActive := s.activeSOVLookup[validationID] + if wasActive { delete(s.activeSOVLookup, validationID) s.activeSOVs.Delete(priorSOV) err = deleteSubnetOnlyValidator(s.activeDB, validationID) @@ -2772,34 +2741,38 @@ func (s *state) writeSubnetOnlyValidators() error { if err != nil { return err } - - if sov.isActive() { - s.activeSOVLookup[validationID] = sov - s.activeSOVs.ReplaceOrInsert(sov) - err = putSubnetOnlyValidator(s.activeDB, sov) - } else { - err = putSubnetOnlyValidator(s.inactiveDB, sov) - } - if err != nil { - return err - } - - // The next loop shouldn't consider this change. - delete(sovChanges, validationID) } - // Perform additions: + // Perform modifications and additions: for validationID, sov := range sovChanges { - subnetIDNodeID := subnetIDNodeID{ - subnetID: sov.SubnetID, - nodeID: sov.NodeID, - } - subnetIDNodeIDKey := subnetIDNodeID.Marshal() - if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + switch err { + case nil: + // This is modifying an existing validator + if priorSOV.isActive() { + delete(s.activeSOVLookup, validationID) + s.activeSOVs.Delete(priorSOV) + err = deleteSubnetOnlyValidator(s.activeDB, validationID) + } else { + err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) + } + if err != nil { + return err + } + case database.ErrNotFound: + // This is a new validator + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + subnetIDNodeIDKey := subnetIDNodeID.Marshal() + if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { + return err + } + default: return err } - var err error if sov.isActive() { s.activeSOVLookup[validationID] = sov s.activeSOVs.ReplaceOrInsert(sov) From 5f8a09ca115f8b363e1d5a9d9506e3150c10f270 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 20:25:42 -0400 Subject: [PATCH 110/155] reduce diff --- vms/platformvm/state/state.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 841fe972278b..004d273a5edd 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2699,7 +2699,6 @@ func writePendingDiff( return nil } -// TODO: Add caching func (s *state) writeSubnetOnlyValidators() error { // Write modified weights: for subnetID, weight := range s.sovDiff.modifiedTotalWeight { @@ -2756,9 +2755,6 @@ func (s *state) writeSubnetOnlyValidators() error { } else { err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) } - if err != nil { - return err - } case database.ErrNotFound: // This is a new validator subnetIDNodeID := subnetIDNodeID{ @@ -2766,10 +2762,9 @@ func (s *state) writeSubnetOnlyValidators() error { nodeID: sov.NodeID, } subnetIDNodeIDKey := subnetIDNodeID.Marshal() - if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { - return err - } - default: + err = s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]) + } + if err != nil { return err } From 3e9dc01fd1327f151149ae749886d7b8a299db8a Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 20:49:51 -0400 Subject: [PATCH 111/155] reduce diff --- vms/platformvm/state/state.go | 6 ++++-- vms/platformvm/state/subnet_only_validator.go | 20 +++---------------- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 004d273a5edd..07e99d505c9d 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2228,7 +2228,8 @@ func (s *state) getInheritedPublicKey(nodeID ids.NodeID) (*bls.PublicKey, error) // updateValidatorManager updates the validator manager with the pending // validator set changes. // -// This function must be called prior to writeCurrentStakers. +// This function must be called prior to writeCurrentStakers and +// writeSubnetOnlyValidators. func (s *state) updateValidatorManager(updateValidators bool) error { if !updateValidators { return nil @@ -2409,7 +2410,8 @@ func (s *state) updateValidatorManager(updateValidators bool) error { // writeValidatorDiffs writes the validator set diff contained by the pending // validator set changes to disk. // -// This function must be called prior to writeCurrentStakers. +// This function must be called prior to writeCurrentStakers and +// writeSubnetOnlyValidators. func (s *state) writeValidatorDiffs(height uint64) error { type validatorChanges struct { weightDiff ValidatorWeightDiff diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 6d2a0f6afb9e..cb40856efc59 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -243,26 +243,17 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne return err } - switch { - case prevWeight < sov.Weight: + if prevWeight != sov.Weight { weight, err := state.WeightOfSubnetOnlyValidators(sov.SubnetID) if err != nil { return err } - weight, err = safemath.Add(weight, sov.Weight-prevWeight) + weight, err = safemath.Sub(weight, prevWeight) if err != nil { return err } - - d.modifiedTotalWeight[sov.SubnetID] = weight - case prevWeight > sov.Weight: - weight, err := state.WeightOfSubnetOnlyValidators(sov.SubnetID) - if err != nil { - return err - } - - weight, err = safemath.Sub(weight, prevWeight-sov.Weight) + weight, err = safemath.Add(weight, sov.Weight) if err != nil { return err } @@ -278,11 +269,6 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne } if prevSOV, ok := d.modified[sov.ValidationID]; ok { - prevSubnetIDNodeID := subnetIDNodeID{ - subnetID: prevSOV.SubnetID, - nodeID: prevSOV.NodeID, - } - d.modifiedHasNodeIDs[prevSubnetIDNodeID] = false d.active.Delete(prevSOV) } d.modified[sov.ValidationID] = sov From da3a7267c73d69d052db7e2e08005f06817d9ad5 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 20:52:54 -0400 Subject: [PATCH 112/155] reduce diff --- vms/platformvm/state/state.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 07e99d505c9d..e0e39a0f14a9 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -773,11 +773,7 @@ func (s *state) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { } // TODO: Add caching - weight, err := database.GetUInt64(s.weightsDB, subnetID[:]) - if err == database.ErrNotFound { - return 0, nil - } - return weight, err + return database.WithDefault(database.GetUInt64, s.weightsDB, subnetID[:], 0) } func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { From 8483cedaaa4397d1c737d95326ee9cb143990e77 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sat, 26 Oct 2024 21:05:18 -0400 Subject: [PATCH 113/155] cleanup --- vms/platformvm/state/diff.go | 6 +++--- vms/platformvm/state/state.go | 12 +++++------ vms/platformvm/state/subnet_only_validator.go | 21 ++++++++++++------- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 47d98863106c..4b8922f74492 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -231,7 +231,7 @@ func (d *diff) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { func (d *diff) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { if sov, modified := d.sovDiff.modified[validationID]; modified { - if sov.Weight == 0 { + if sov.isDeleted() { return SubnetOnlyValidator{}, database.ErrNotFound } return sov, nil @@ -577,7 +577,7 @@ func (d *diff) Apply(baseState Chain) error { // a single diff can't get reordered into the addition happening first; // which would return an error. for _, sov := range d.sovDiff.modified { - if sov.Weight != 0 { + if !sov.isDeleted() { continue } if err := baseState.PutSubnetOnlyValidator(sov); err != nil { @@ -585,7 +585,7 @@ func (d *diff) Apply(baseState Chain) error { } } for _, sov := range d.sovDiff.modified { - if sov.Weight == 0 { + if sov.isDeleted() { continue } if err := baseState.PutSubnetOnlyValidator(sov); err != nil { diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index e0e39a0f14a9..421c2355e5a3 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -778,7 +778,7 @@ func (s *state) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { if sov, modified := s.sovDiff.modified[validationID]; modified { - if sov.Weight == 0 { + if sov.isDeleted() { return SubnetOnlyValidator{}, database.ErrNotFound } return sov, nil @@ -2283,7 +2283,7 @@ func (s *state) updateValidatorManager(updateValidators bool) error { sovChangesApplied set.Set[ids.ID] ) for validationID, sov := range sovChanges { - if sov.Weight != 0 { + if !sov.isDeleted() { // Additions and modifications are handled in the next loops. continue } @@ -2327,14 +2327,14 @@ func (s *state) updateValidatorManager(updateValidators bool) error { sovChangesApplied.Add(validationID) switch { - case !priorSOV.isActive() && sov.isActive(): + case priorSOV.isInactive() && sov.isActive(): // This validator is being activated. pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) err = errors.Join( s.validators.RemoveWeight(sov.SubnetID, ids.EmptyNodeID, priorSOV.Weight), s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight), ) - case priorSOV.isActive() && !sov.isActive(): + case priorSOV.isActive() && sov.isInactive(): // This validator is being deactivated. inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) if inactiveWeight == 0 { @@ -2484,7 +2484,7 @@ func (s *state) writeValidatorDiffs(height uint64) error { // Perform SoV additions: for _, sov := range s.sovDiff.modified { // If the validator is being removed, we shouldn't work to re-add it. - if sov.Weight == 0 { + if sov.isDeleted() { continue } @@ -2709,7 +2709,7 @@ func (s *state) writeSubnetOnlyValidators() error { sovChanges := s.sovDiff.modified // Perform deletions: for validationID, sov := range sovChanges { - if sov.Weight != 0 { + if !sov.isDeleted() { // Additions and modifications are handled in the next loops. continue } diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index cb40856efc59..932aa1a20cb2 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -139,10 +139,18 @@ func (v SubnetOnlyValidator) constantsAreUnmodified(o SubnetOnlyValidator) bool v.StartTime == o.StartTime } +func (v SubnetOnlyValidator) isDeleted() bool { + return v.Weight == 0 +} + func (v SubnetOnlyValidator) isActive() bool { return v.Weight != 0 && v.EndAccumulatedFee != 0 } +func (v SubnetOnlyValidator) isInactive() bool { + return v.Weight != 0 && v.EndAccumulatedFee == 0 +} + func getSubnetOnlyValidator(db database.KeyValueReader, validationID ids.ID) (SubnetOnlyValidator, error) { bytes, err := db.Get(validationID[:]) if err != nil { @@ -211,7 +219,7 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne var ( prevWeight uint64 prevActive bool - newActive = sov.Weight != 0 && sov.EndAccumulatedFee != 0 + newActive = sov.isActive() ) switch priorSOV, err := state.GetSubnetOnlyValidator(sov.ValidationID); err { case nil: @@ -220,7 +228,7 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne } prevWeight = priorSOV.Weight - prevActive = priorSOV.EndAccumulatedFee != 0 + prevActive = priorSOV.isActive() case database.ErrNotFound: // Verify that there is not a legacy subnet validator with the same // subnetID+nodeID as this L1 validator. @@ -277,12 +285,9 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne subnetID: sov.SubnetID, nodeID: sov.NodeID, } - isDeleted := sov.Weight == 0 - d.modifiedHasNodeIDs[subnetIDNodeID] = !isDeleted - if isDeleted || sov.EndAccumulatedFee == 0 { - // Validator is being deleted or is inactive - return nil + d.modifiedHasNodeIDs[subnetIDNodeID] = !sov.isDeleted() + if sov.isActive() { + d.active.ReplaceOrInsert(sov) } - d.active.ReplaceOrInsert(sov) return nil } From 0507ce702057b92bbf0b8b14680fd79c02b43fd4 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 27 Oct 2024 14:36:10 -0400 Subject: [PATCH 114/155] nit --- vms/platformvm/state/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 33ae0e61e2ee..ce4869792c61 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2054,7 +2054,7 @@ func (s *state) getInheritedPublicKey(nodeID ids.NodeID) (*bls.PublicKey, error) return vdr.validator.PublicKey, nil } if vdr, ok := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID][nodeID]; ok && vdr.validator != nil { - // The primary network validator is being removed. + // The primary network validator is being modified. return vdr.validator.PublicKey, nil } return nil, fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) From 8bbeee650f5d3e9834ae9f4b8bb809b52b885ae9 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 27 Oct 2024 15:49:29 -0400 Subject: [PATCH 115/155] Add comment --- vms/platformvm/state/subnet_only_validator.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 932aa1a20cb2..41014793c2ab 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -195,6 +195,8 @@ func newSubnetOnlyValidatorsDiff() *subnetOnlyValidatorsDiff { } } +// getActiveSubnetOnlyValidatorsIterator takes in the parent iterator, removes +// all modified validators, and then adds all modified active validators. func (d *subnetOnlyValidatorsDiff) getActiveSubnetOnlyValidatorsIterator(parentIterator iterator.Iterator[SubnetOnlyValidator]) iterator.Iterator[SubnetOnlyValidator] { return iterator.Merge( SubnetOnlyValidator.Less, From 08dd776bce7b5a727636f71f5f7f163662a4f023 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 27 Oct 2024 16:00:30 -0400 Subject: [PATCH 116/155] comment --- vms/platformvm/state/state.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index abedb5343cdb..f4ed7f26d7e5 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -787,6 +787,9 @@ func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator return s.getPersistedSubnetOnlyValidator(validationID) } +// getPersistedSubnetOnlyValidator returns the currently persisted +// SubnetOnlyValidator with the given validationID. It is guaranteed that any +// returned validator is either active or inactive. func (s *state) getPersistedSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { if sov, ok := s.activeSOVLookup[validationID]; ok { return sov, nil @@ -2733,6 +2736,9 @@ func (s *state) writeSubnetOnlyValidators() error { s.activeSOVs.Delete(priorSOV) err = deleteSubnetOnlyValidator(s.activeDB, validationID) } else { + // It is technically possible for the validator not to exist on disk + // here, but that's fine as deleting an entry that doesn't exist is + // a noop. err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) } if err != nil { From 255b0bf92c322f41536801d4ac8f41034719ef12 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 27 Oct 2024 16:15:45 -0400 Subject: [PATCH 117/155] nit --- vms/platformvm/state/subnet_only_validator.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 41014793c2ab..ddaa5eb5a50a 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -14,9 +14,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/iterator" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/platformvm/block" - - safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -259,11 +258,11 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne return err } - weight, err = safemath.Sub(weight, prevWeight) + weight, err = math.Sub(weight, prevWeight) if err != nil { return err } - weight, err = safemath.Add(weight, sov.Weight) + weight, err = math.Add(weight, sov.Weight) if err != nil { return err } From 3bc547d7741946ab731e6d29055c2e31deebe0ab Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Sun, 27 Oct 2024 18:19:52 -0400 Subject: [PATCH 118/155] reduce diff --- vms/platformvm/validators/manager.go | 94 +++++++++++++++++++++++++--- 1 file changed, 87 insertions(+), 7 deletions(-) diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index 2e67faa63464..142db3e7635c 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -200,7 +200,17 @@ func (m *manager) GetValidatorSet( // get the start time to track metrics startTime := m.clk.Time() - validatorSet, currentHeight, err := m.makeValidatorSet(ctx, targetHeight, subnetID) + + var ( + validatorSet map[ids.NodeID]*validators.GetValidatorOutput + currentHeight uint64 + err error + ) + if subnetID == constants.PrimaryNetworkID { + validatorSet, currentHeight, err = m.makePrimaryNetworkValidatorSet(ctx, targetHeight) + } else { + validatorSet, currentHeight, err = m.makeSubnetValidatorSet(ctx, targetHeight, subnetID) + } if err != nil { return nil, err } @@ -233,12 +243,65 @@ func (m *manager) getValidatorSetCache(subnetID ids.ID) cache.Cacher[uint64, map return validatorSetsCache } -func (m *manager) makeValidatorSet( +func (m *manager) makePrimaryNetworkValidatorSet( + ctx context.Context, + targetHeight uint64, +) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { + validatorSet, currentHeight, err := m.getCurrentPrimaryValidatorSet(ctx) + if err != nil { + return nil, 0, err + } + if currentHeight < targetHeight { + return nil, 0, fmt.Errorf("%w with SubnetID = %s: current P-chain height (%d) < requested P-Chain height (%d)", + errUnfinalizedHeight, + constants.PrimaryNetworkID, + currentHeight, + targetHeight, + ) + } + + // Rebuild primary network validators at [targetHeight] + // + // Note: Since we are attempting to generate the validator set at + // [targetHeight], we want to apply the diffs from + // (targetHeight, currentHeight]. Because the state interface is implemented + // to be inclusive, we apply diffs in [targetHeight + 1, currentHeight]. + lastDiffHeight := targetHeight + 1 + err = m.state.ApplyValidatorWeightDiffs( + ctx, + validatorSet, + currentHeight, + lastDiffHeight, + constants.PrimaryNetworkID, + ) + if err != nil { + return nil, 0, err + } + + err = m.state.ApplyValidatorPublicKeyDiffs( + ctx, + validatorSet, + currentHeight, + lastDiffHeight, + constants.PrimaryNetworkID, + ) + return validatorSet, currentHeight, err +} + +func (m *manager) getCurrentPrimaryValidatorSet( + ctx context.Context, +) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { + primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) + currentHeight, err := m.getCurrentHeight(ctx) + return primaryMap, currentHeight, err +} + +func (m *manager) makeSubnetValidatorSet( ctx context.Context, targetHeight uint64, subnetID ids.ID, ) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { - subnetValidatorSet, currentHeight, err := m.getCurrentValidatorSet(ctx, subnetID) + subnetValidatorSet, primaryValidatorSet, currentHeight, err := m.getCurrentValidatorSets(ctx, subnetID) if err != nil { return nil, 0, err } @@ -269,23 +332,40 @@ func (m *manager) makeValidatorSet( return nil, 0, err } + // Update the subnet validator set to include the public keys at + // [currentHeight]. When we apply the public key diffs, we will convert + // these keys to represent the public keys at [targetHeight]. If the subnet + // validator is not currently a primary network validator, it doesn't have a + // key at [currentHeight]. + for nodeID, vdr := range subnetValidatorSet { + if primaryVdr, ok := primaryValidatorSet[nodeID]; ok { + vdr.PublicKey = primaryVdr.PublicKey + } else { + vdr.PublicKey = nil + } + } + err = m.state.ApplyValidatorPublicKeyDiffs( ctx, subnetValidatorSet, currentHeight, lastDiffHeight, - subnetID, + // TODO: Etna introduces L1s whose validators specify their own public + // keys, rather than inheriting them from the primary network. + // Therefore, this will need to use the subnetID after Etna. + constants.PrimaryNetworkID, ) return subnetValidatorSet, currentHeight, err } -func (m *manager) getCurrentValidatorSet( +func (m *manager) getCurrentValidatorSets( ctx context.Context, subnetID ids.ID, -) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { +) (map[ids.NodeID]*validators.GetValidatorOutput, map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { subnetMap := m.cfg.Validators.GetMap(subnetID) + primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) currentHeight, err := m.getCurrentHeight(ctx) - return subnetMap, currentHeight, err + return subnetMap, primaryMap, currentHeight, err } func (m *manager) GetSubnetID(_ context.Context, chainID ids.ID) (ids.ID, error) { From 41f78f0915f4ffd54e7ca4572b6fe1de21faa6e5 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 13:35:31 -0400 Subject: [PATCH 119/155] nit --- vms/platformvm/state/state.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 9372357270e8..1bbb4c1b1e9b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -809,10 +809,11 @@ func (s *state) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool return has, nil } - // TODO: Add caching - key := make([]byte, len(subnetID)+len(nodeID)) - copy(key, subnetID[:]) - copy(key[len(subnetID):], nodeID[:]) + subnetIDNodeID := subnetIDNodeID{ + subnetID: subnetID, + nodeID: nodeID, + } + key := subnetIDNodeID.Marshal() return s.subnetIDNodeIDDB.Has(key) } From 29cd6badb5c59965b4d18ed8b058a006d29abf66 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 13:38:15 -0400 Subject: [PATCH 120/155] nit --- vms/platformvm/state/state.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 1bbb4c1b1e9b..68021ebebf0a 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -814,6 +814,8 @@ func (s *state) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool nodeID: nodeID, } key := subnetIDNodeID.Marshal() + + // TODO: Add caching return s.subnetIDNodeIDDB.Has(key) } From 8dfcbb15570d7618722c338e541ea139455ce4b9 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 14:10:25 -0400 Subject: [PATCH 121/155] Fix initValidatorSets --- snow/validators/manager.go | 10 +++++ vms/platformvm/state/state.go | 10 ++--- vms/platformvm/state/state_test.go | 67 ++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 5 deletions(-) diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 45ba32c0e261..6fd8a1b5f7c5 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -80,6 +80,9 @@ type Manager interface { // If an error is returned, the set will be unmodified. RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error + // NumSubnets returns the number of subnets with non-zero weight. + NumSubnets() int + // Count returns the number of validators currently in the subnet. Count(subnetID ids.ID) int @@ -227,6 +230,13 @@ func (m *manager) RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64 return nil } +func (m *manager) NumSubnets() int { + m.lock.RLock() + defer m.lock.RUnlock() + + return len(m.subnetToVdrs) +} + func (m *manager) Count(subnetID ids.ID) int { m.lock.RLock() set, exists := m.subnetToVdrs[subnetID] diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 68021ebebf0a..56cb2d85920e 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1867,6 +1867,11 @@ func (s *state) loadPendingValidators() error { // Invariant: initValidatorSets requires loadActiveSubnetOnlyValidators and // loadCurrentValidators to have already been called. func (s *state) initValidatorSets() error { + if s.validators.NumSubnets() != 0 { + // Enforce the invariant that the validator set is empty here. + return errValidatorSetAlreadyPopulated + } + // Load ACP77 validators for validationID, sov := range s.activeSOVLookup { pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) @@ -1913,11 +1918,6 @@ func (s *state) initValidatorSets() error { // Load primary network and non-ACP77 validators primaryNetworkValidators := s.currentStakers.validators[constants.PrimaryNetworkID] for subnetID, subnetValidators := range s.currentStakers.validators { - if s.validators.Count(subnetID) != 0 { - // Enforce the invariant that the validator set is empty here. - return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) - } - for nodeID, subnetValidator := range subnetValidators { // The subnet validator's Public Key is inherited from the // corresponding primary network validator. diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 0ff795aefbdd..5dbf976e3270 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1944,6 +1944,73 @@ func TestSubnetOnlyValidators(t *testing.T) { } } +// TestLoadSubnetOnlyValidatorAndLegacy tests that the state can be loaded when +// there is a mix of legacy validators and subnet only validators in the same +// subnet. +func TestLoadSubnetOnlyValidatorAndLegacy(t *testing.T) { + var ( + require = require.New(t) + db = memdb.New() + state = newTestState(t, db) + subnetID = ids.GenerateTestID() + weight uint64 = 1 + ) + + unsignedAddSubnetValidator := createPermissionlessValidatorTx( + t, + subnetID, + txs.Validator{ + NodeID: defaultValidatorNodeID, + End: genesistest.DefaultValidatorEndTimeUnix, + Wght: weight, + }, + ) + addSubnetValidator := &txs.Tx{Unsigned: unsignedAddSubnetValidator} + require.NoError(addSubnetValidator.Initialize(txs.Codec)) + state.AddTx(addSubnetValidator, status.Committed) + + legacyStaker := &Staker{ + TxID: addSubnetValidator.ID(), + NodeID: defaultValidatorNodeID, + PublicKey: nil, + SubnetID: subnetID, + Weight: weight, + StartTime: genesistest.DefaultValidatorStartTime, + EndTime: genesistest.DefaultValidatorEndTime, + PotentialReward: 0, + } + require.NoError(state.PutCurrentValidator(legacyStaker)) + + sk, err := bls.NewSecretKey() + require.NoError(err) + pk := bls.PublicFromSecretKey(sk) + pkBytes := bls.PublicKeyToUncompressedBytes(pk) + + sov := SubnetOnlyValidator{ + ValidationID: ids.GenerateTestID(), + SubnetID: legacyStaker.SubnetID, + NodeID: ids.GenerateTestNodeID(), + PublicKey: pkBytes, + RemainingBalanceOwner: utils.RandomBytes(32), + DeactivationOwner: utils.RandomBytes(32), + StartTime: 1, + Weight: 2, + MinNonce: 3, + EndAccumulatedFee: 4, + } + require.NoError(state.PutSubnetOnlyValidator(sov)) + + state.SetHeight(1) + require.NoError(state.Commit()) + + expectedValidatorSet := state.validators.GetMap(subnetID) + + state = newTestState(t, db) + + validatorSet := state.validators.GetMap(subnetID) + require.Equal(expectedValidatorSet, validatorSet) +} + func TestSubnetOnlyValidatorAfterLegacyRemoval(t *testing.T) { require := require.New(t) From ef295480177969f4c4079073e54651bb1ef413be Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 14:35:25 -0400 Subject: [PATCH 122/155] nit --- node/overridden_manager.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 484fe05da758..e961d2bd2d46 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -56,6 +56,10 @@ func (o *overriddenManager) RemoveWeight(_ ids.ID, nodeID ids.NodeID, weight uin return o.manager.RemoveWeight(o.subnetID, nodeID, weight) } +func (o *overriddenManager) NumSubnets() int { + return 1 +} + func (o *overriddenManager) Count(ids.ID) int { return o.manager.Count(o.subnetID) } From a77fb3c517811e9ee451c9aa3e4e5196fcb117af Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 14:35:48 -0400 Subject: [PATCH 123/155] test subnetIDNodeIDDB --- vms/platformvm/state/state_test.go | 43 +++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 5dbf976e3270..50ef6b4919df 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1832,7 +1832,7 @@ func TestSubnetOnlyValidators(t *testing.T) { verifyChain := func(chain Chain) { for _, expectedSOV := range expectedSOVs { - if expectedSOV.Weight != 0 { + if !expectedSOV.isDeleted() { continue } @@ -1846,7 +1846,7 @@ func TestSubnetOnlyValidators(t *testing.T) { expectedActive []SubnetOnlyValidator ) for _, expectedSOV := range expectedSOVs { - if expectedSOV.Weight == 0 { + if expectedSOV.isDeleted() { continue } @@ -1893,13 +1893,50 @@ func TestSubnetOnlyValidators(t *testing.T) { verifyChain(state) assertChainsEqual(t, state, d) + // Verify that the subnetID+nodeID -> validationID mapping is correct. + var populatedSubnetIDNodeIDs set.Set[subnetIDNodeID] + for _, sov := range expectedSOVs { + if sov.isDeleted() { + continue + } + + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + populatedSubnetIDNodeIDs.Add(subnetIDNodeID) + + subnetIDNodeIDKey := subnetIDNodeID.Marshal() + validatorID, err := database.GetID(state.subnetIDNodeIDDB, subnetIDNodeIDKey) + require.NoError(err) + require.Equal(sov.ValidationID, validatorID) + } + for _, sov := range expectedSOVs { + if !sov.isDeleted() { + continue + } + + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + if populatedSubnetIDNodeIDs.Contains(subnetIDNodeID) { + continue + } + + subnetIDNodeIDKey := subnetIDNodeID.Marshal() + has, err := state.subnetIDNodeIDDB.Has(subnetIDNodeIDKey) + require.NoError(err) + require.False(has) + } + sovsToValidatorSet := func( sovs map[ids.ID]SubnetOnlyValidator, subnetID ids.ID, ) map[ids.NodeID]*validators.GetValidatorOutput { validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput) for _, sov := range sovs { - if sov.SubnetID != subnetID || sov.Weight == 0 { + if sov.SubnetID != subnetID || sov.isDeleted() { continue } From ce05dc86a64624e348c97c5fc328009cc6b27799 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 14:40:54 -0400 Subject: [PATCH 124/155] fix comments --- vms/platformvm/state/state_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 50ef6b4919df..d65d2af9bf3c 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1730,7 +1730,7 @@ func TestSubnetOnlyValidators(t *testing.T) { NodeID: sov.NodeID, PublicKey: pkBytes, Weight: 2, // Not removed - EndAccumulatedFee: 1, // Inactive + EndAccumulatedFee: 1, // Active }, { ValidationID: sov.ValidationID, @@ -1738,7 +1738,7 @@ func TestSubnetOnlyValidators(t *testing.T) { NodeID: sov.NodeID, PublicKey: pkBytes, Weight: 3, // Not removed - EndAccumulatedFee: 1, // Inactive + EndAccumulatedFee: 1, // Active }, }, }, @@ -1768,7 +1768,7 @@ func TestSubnetOnlyValidators(t *testing.T) { NodeID: sov.NodeID, PublicKey: otherPKBytes, Weight: 1, // Not removed - EndAccumulatedFee: 1, // Inactive + EndAccumulatedFee: 1, // Active }, }, }, From 3d04cefefe4fb1999404b6ac016f0603931e1798 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 14:45:53 -0400 Subject: [PATCH 125/155] nit --- node/overridden_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/overridden_manager.go b/node/overridden_manager.go index e961d2bd2d46..12c647b53fe4 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -56,7 +56,7 @@ func (o *overriddenManager) RemoveWeight(_ ids.ID, nodeID ids.NodeID, weight uin return o.manager.RemoveWeight(o.subnetID, nodeID, weight) } -func (o *overriddenManager) NumSubnets() int { +func (*overriddenManager) NumSubnets() int { return 1 } From dc35645a866cd8beec3f7e13844f18c2c74359d7 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 14:46:22 -0400 Subject: [PATCH 126/155] nit --- node/overridden_manager.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 12c647b53fe4..61e37833f140 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -56,7 +56,10 @@ func (o *overriddenManager) RemoveWeight(_ ids.ID, nodeID ids.NodeID, weight uin return o.manager.RemoveWeight(o.subnetID, nodeID, weight) } -func (*overriddenManager) NumSubnets() int { +func (o *overriddenManager) NumSubnets() int { + if o.manager.Count(o.subnetID) == 0 { + return 0 + } return 1 } From d472a9f6ebe1dffa4ec34e21cb2b0ed46e40cf45 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 16:07:39 -0400 Subject: [PATCH 127/155] Reduce diff --- vms/platformvm/state/state.go | 206 ++++++------------ vms/platformvm/state/state_test.go | 12 +- vms/platformvm/state/subnet_only_validator.go | 29 ++- 3 files changed, 91 insertions(+), 156 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 56cb2d85920e..e2451ecdeedb 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -34,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -2236,6 +2235,20 @@ func (s *state) getInheritedPublicKey(nodeID ids.NodeID) (*bls.PublicKey, error) return nil, fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) } +func (s *state) addSoVToValidatorManager(sov SubnetOnlyValidator) error { + nodeID := sov.effectiveNodeID() + if s.validators.GetWeight(sov.SubnetID, nodeID) != 0 { + return s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight) + } + return s.validators.AddStaker( + sov.SubnetID, + nodeID, + sov.effectivePublicKey(), + sov.effectiveValidationID(), + sov.Weight, + ) +} + // updateValidatorManager updates the validator manager with the pending // validator set changes. // @@ -2292,115 +2305,41 @@ func (s *state) updateValidatorManager(updateValidators bool) error { } } - // Perform SoV deletions: - var ( - sovChanges = s.sovDiff.modified - sovChangesApplied set.Set[ids.ID] - ) - for validationID, sov := range sovChanges { - if !sov.isDeleted() { - // Additions and modifications are handled in the next loops. - continue - } - - sovChangesApplied.Add(validationID) - - priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err == database.ErrNotFound { - // Deleting a non-existent validator is a noop. This can happen if - // the validator was added and then immediately removed. - continue - } - if err != nil { - return err - } - - nodeID := ids.EmptyNodeID - if priorSOV.isActive() { - nodeID = priorSOV.NodeID - } - if err := s.validators.RemoveWeight(priorSOV.SubnetID, nodeID, priorSOV.Weight); err != nil { - return fmt.Errorf("failed to delete SoV validator: %w", err) - } - } - - // Perform modifications: - for validationID, sov := range sovChanges { - if sovChangesApplied.Contains(validationID) { - continue - } - + for validationID, sov := range s.sovDiff.modified { priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err == database.ErrNotFound { - // New additions are handled in the next loop. - continue - } - if err != nil { - return err - } - - sovChangesApplied.Add(validationID) - - switch { - case priorSOV.isInactive() && sov.isActive(): - // This validator is being activated. - pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) - err = errors.Join( - s.validators.RemoveWeight(sov.SubnetID, ids.EmptyNodeID, priorSOV.Weight), - s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight), - ) - case priorSOV.isActive() && sov.isInactive(): - // This validator is being deactivated. - inactiveWeight := s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) - if inactiveWeight == 0 { - err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) + switch err { + case nil: + if sov.isDeleted() { + // Removing a validator + err = s.validators.RemoveWeight(priorSOV.SubnetID, priorSOV.effectiveNodeID(), priorSOV.Weight) } else { - err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) - } - err = errors.Join( - err, - s.validators.RemoveWeight(sov.SubnetID, sov.NodeID, priorSOV.Weight), - ) - default: - // This validator's active status isn't changing. - nodeID := ids.EmptyNodeID - if sov.isActive() { - nodeID = sov.NodeID - } - if priorSOV.Weight < sov.Weight { - err = s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight-priorSOV.Weight) - } else if priorSOV.Weight > sov.Weight { - err = s.validators.RemoveWeight(sov.SubnetID, nodeID, priorSOV.Weight-sov.Weight) + // Modifying a validator + if priorSOV.isActive() == sov.isActive() { + // This validator's active status isn't changing. This means + // the effectiveNodeIDs are equal. + nodeID := sov.effectiveNodeID() + if priorSOV.Weight < sov.Weight { + err = s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight-priorSOV.Weight) + } else if priorSOV.Weight > sov.Weight { + err = s.validators.RemoveWeight(sov.SubnetID, nodeID, priorSOV.Weight-sov.Weight) + } + } else { + // This validator's active status is changing. + err = errors.Join( + s.validators.RemoveWeight(sov.SubnetID, priorSOV.effectiveNodeID(), priorSOV.Weight), + s.addSoVToValidatorManager(sov), + ) + } } - } - if err != nil { - return err - } - } - - // Perform additions: - for validationID, sov := range sovChanges { - if sovChangesApplied.Contains(validationID) { - continue - } - - if sov.isActive() { - pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) - if err := s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { - return fmt.Errorf("failed to add SoV validator: %w", err) + case database.ErrNotFound: + if sov.isDeleted() { + // Deleting a non-existent validator is a noop. This can happen + // if the validator was added and then immediately removed. + continue } - continue - } - // This validator is inactive - var ( - inactiveWeight = s.validators.GetWeight(sov.SubnetID, ids.EmptyNodeID) - err error - ) - if inactiveWeight == 0 { - err = s.validators.AddStaker(sov.SubnetID, ids.EmptyNodeID, nil, ids.Empty, sov.Weight) - } else { - err = s.validators.AddWeight(sov.SubnetID, ids.EmptyNodeID, sov.Weight) + // Adding a validator + err = s.addSoVToValidatorManager(sov) } if err != nil { return err @@ -2467,61 +2406,40 @@ func (s *state) calculateValidatorDiffs() (map[subnetIDNodeID]*validatorDiff, er } } - // Perform SoV deletions: - for validationID := range s.sovDiff.modified { + // Calculate the changes to the ACP-77 validator set + for validationID, sov := range s.sovDiff.modified { priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - if err == database.ErrNotFound { - continue - } - if err != nil { - return nil, err - } - - var ( - diff *validatorDiff - subnetIDNodeID = subnetIDNodeID{ + if err == nil { + // Delete the prior validator + subnetIDNodeID := subnetIDNodeID{ subnetID: priorSOV.SubnetID, + nodeID: priorSOV.effectiveNodeID(), } - ) - if priorSOV.isActive() { - subnetIDNodeID.nodeID = priorSOV.NodeID - diff = getOrDefault(changes, subnetIDNodeID) - diff.prevPublicKey = priorSOV.PublicKey - } else { - subnetIDNodeID.nodeID = ids.EmptyNodeID - diff = getOrDefault(changes, subnetIDNodeID) + diff := getOrDefault(changes, subnetIDNodeID) + if err := diff.weightDiff.Add(true, priorSOV.Weight); err != nil { + return nil, err + } + diff.prevPublicKey = priorSOV.effectivePublicKeyBytes() } - - if err := diff.weightDiff.Add(true, priorSOV.Weight); err != nil { + if err != database.ErrNotFound && err != nil { return nil, err } - } - // Perform SoV additions: - for _, sov := range s.sovDiff.modified { // If the validator is being removed, we shouldn't work to re-add it. if sov.isDeleted() { continue } - var ( - diff *validatorDiff - subnetIDNodeID = subnetIDNodeID{ - subnetID: sov.SubnetID, - } - ) - if sov.isActive() { - subnetIDNodeID.nodeID = sov.NodeID - diff = getOrDefault(changes, subnetIDNodeID) - diff.newPublicKey = sov.PublicKey - } else { - subnetIDNodeID.nodeID = ids.EmptyNodeID - diff = getOrDefault(changes, subnetIDNodeID) + // Add the new validator + subnetIDNodeID := subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.effectiveNodeID(), } - + diff := getOrDefault(changes, subnetIDNodeID) if err := diff.weightDiff.Add(false, sov.Weight); err != nil { return nil, err } + diff.newPublicKey = sov.effectivePublicKeyBytes() } return changes, nil diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index d65d2af9bf3c..bc3ab907c7c7 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1940,20 +1940,12 @@ func TestSubnetOnlyValidators(t *testing.T) { continue } - nodeID := sov.NodeID - publicKey := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) - // Inactive validators are combined into a single validator - // with the empty ID. - if sov.EndAccumulatedFee == 0 { - nodeID = ids.EmptyNodeID - publicKey = nil - } - + nodeID := sov.effectiveNodeID() vdr, ok := validatorSet[nodeID] if !ok { vdr = &validators.GetValidatorOutput{ NodeID: nodeID, - PublicKey: publicKey, + PublicKey: sov.effectivePublicKey(), } validatorSet[nodeID] = vdr } diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index ddaa5eb5a50a..24ea8bde739e 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -146,8 +147,32 @@ func (v SubnetOnlyValidator) isActive() bool { return v.Weight != 0 && v.EndAccumulatedFee != 0 } -func (v SubnetOnlyValidator) isInactive() bool { - return v.Weight != 0 && v.EndAccumulatedFee == 0 +func (v SubnetOnlyValidator) effectiveValidationID() ids.ID { + if v.isActive() { + return v.ValidationID + } + return ids.Empty +} + +func (v SubnetOnlyValidator) effectiveNodeID() ids.NodeID { + if v.isActive() { + return v.NodeID + } + return ids.EmptyNodeID +} + +func (v SubnetOnlyValidator) effectivePublicKey() *bls.PublicKey { + if v.isActive() { + return bls.PublicKeyFromValidUncompressedBytes(v.PublicKey) + } + return nil +} + +func (v SubnetOnlyValidator) effectivePublicKeyBytes() []byte { + if v.isActive() { + return v.PublicKey + } + return nil } func getSubnetOnlyValidator(db database.KeyValueReader, validationID ids.ID) (SubnetOnlyValidator, error) { From 34ba29b13325af33ba3773617c907db80269a73f Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 29 Oct 2024 16:16:46 -0400 Subject: [PATCH 128/155] add comment --- vms/platformvm/state/state.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index e2451ecdeedb..36c95516bab3 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1894,6 +1894,8 @@ func (s *state) initValidatorSets() error { return err } + // It is required for the SoVs to be loaded first so that the total + // weight is equal to the active weights here. activeWeight, err := s.validators.TotalWeight(subnetID) if err != nil { return err From 97029aa1e051df281456cecded55e1b67b730c7d Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 10:12:36 -0400 Subject: [PATCH 129/155] reduce diff --- vms/platformvm/state/state.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 36c95516bab3..b9c975599f56 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2653,7 +2653,6 @@ func (s *state) writeSubnetOnlyValidators() error { return err } } - maps.Clear(s.sovDiff.modifiedTotalWeight) sovChanges := s.sovDiff.modified // Perform deletions: From dbeee70561cce248f44769117c8097fef29b7c48 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 10:45:14 -0400 Subject: [PATCH 130/155] Add NumSubnets to the validator manager interface --- network/network.go | 2 +- node/node.go | 2 +- node/overridden_manager.go | 11 +++- snow/engine/snowman/bootstrap/bootstrapper.go | 2 +- .../snowman/bootstrap/bootstrapper_test.go | 4 +- snow/engine/snowman/syncer/state_syncer.go | 2 +- .../snowman/syncer/state_syncer_test.go | 12 ++-- snow/engine/snowman/syncer/utils_test.go | 2 +- snow/validators/manager.go | 16 ++++- snow/validators/manager_test.go | 61 +++++++++++++------ vms/platformvm/state/state.go | 10 +-- vms/platformvm/vm_test.go | 4 +- 12 files changed, 83 insertions(+), 45 deletions(-) diff --git a/network/network.go b/network/network.go index eab4ecca085e..816cbc69c7a2 100644 --- a/network/network.go +++ b/network/network.go @@ -778,7 +778,7 @@ func (n *network) samplePeers( // As an optimization, if there are fewer validators than // [numValidatorsToSample], only attempt to sample [numValidatorsToSample] // validators to potentially avoid iterating over the entire peer set. - numValidatorsToSample := min(config.Validators, n.config.Validators.Count(subnetID)) + numValidatorsToSample := min(config.Validators, n.config.Validators.NumValidators(subnetID)) n.peersLock.RLock() defer n.peersLock.RUnlock() diff --git a/node/node.go b/node/node.go index 1fedf35eb97e..8cdc8edfce17 100644 --- a/node/node.go +++ b/node/node.go @@ -600,7 +600,7 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { } n.onSufficientlyConnected = make(chan struct{}) - numBootstrappers := n.bootstrappers.Count(constants.PrimaryNetworkID) + numBootstrappers := n.bootstrappers.NumValidators(constants.PrimaryNetworkID) requiredConns := (3*numBootstrappers + 3) / 4 if requiredConns > 0 { diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 484fe05da758..467dd7df1fa7 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -56,8 +56,15 @@ func (o *overriddenManager) RemoveWeight(_ ids.ID, nodeID ids.NodeID, weight uin return o.manager.RemoveWeight(o.subnetID, nodeID, weight) } -func (o *overriddenManager) Count(ids.ID) int { - return o.manager.Count(o.subnetID) +func (o *overriddenManager) NumSubnets() int { + if o.manager.NumValidators(o.subnetID) == 0 { + return 0 + } + return 1 +} + +func (o *overriddenManager) NumValidators(ids.ID) int { + return o.manager.NumValidators(o.subnetID) } func (o *overriddenManager) TotalWeight(ids.ID) (uint64, error) { diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 024925c79288..4b0b511910f0 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -300,7 +300,7 @@ func (b *Bootstrapper) sendBootstrappingMessagesOrFinish(ctx context.Context) er if numAccepted == 0 { b.Ctx.Log.Debug("restarting bootstrap", zap.String("reason", "no blocks accepted"), - zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), + zap.Int("numBeacons", b.Beacons.NumValidators(b.Ctx.SubnetID)), ) // Invariant: These functions are mutualy recursive. However, when // [startBootstrapping] calls [sendMessagesOrFinish], it is guaranteed diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 772cf51281e9..7803f82a725f 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -98,7 +98,7 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *enginetest.Sender, *blocktest AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: vdrs, - SampleK: vdrs.Count(ctx.SubnetID), + SampleK: vdrs.NumValidators(ctx.SubnetID), StartupTracker: startupTracker, PeerTracker: peerTracker, Sender: sender, @@ -693,7 +693,7 @@ func TestBootstrapNoParseOnNew(t *testing.T) { AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: peers, - SampleK: peers.Count(ctx.SubnetID), + SampleK: peers.NumValidators(ctx.SubnetID), StartupTracker: startupTracker, PeerTracker: peerTracker, Sender: sender, diff --git a/snow/engine/snowman/syncer/state_syncer.go b/snow/engine/snowman/syncer/state_syncer.go index 76e647e73a64..7e669fee2534 100644 --- a/snow/engine/snowman/syncer/state_syncer.go +++ b/snow/engine/snowman/syncer/state_syncer.go @@ -373,7 +373,7 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node if votingStakes < ss.Alpha { ss.Ctx.Log.Debug("restarting state sync", zap.String("reason", "not enough votes received"), - zap.Int("numBeacons", ss.StateSyncBeacons.Count(ss.Ctx.SubnetID)), + zap.Int("numBeacons", ss.StateSyncBeacons.NumValidators(ss.Ctx.SubnetID)), zap.Int("numFailedSyncers", ss.failedVoters.Len()), ) return ss.startup(ctx) diff --git a/snow/engine/snowman/syncer/state_syncer_test.go b/snow/engine/snowman/syncer/state_syncer_test.go index fd062cb2d8b1..ffb7de7ffca7 100644 --- a/snow/engine/snowman/syncer/state_syncer_test.go +++ b/snow/engine/snowman/syncer/state_syncer_test.go @@ -247,7 +247,7 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { } // check that vdrs are reached out for frontiers - require.Len(contactedFrontiersProviders, min(beacons.Count(ctx.SubnetID), maxOutstandingBroadcastRequests)) + require.Len(contactedFrontiersProviders, min(beacons.NumValidators(ctx.SubnetID), maxOutstandingBroadcastRequests)) for beaconID := range contactedFrontiersProviders { // check that beacon is duly marked as reached out require.Contains(syncer.pendingSeeders, beaconID) @@ -344,7 +344,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // other listed vdrs are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.NumValidators(ctx.SubnetID)) } func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { @@ -413,7 +413,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.NumValidators(ctx.SubnetID)) } func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { @@ -475,7 +475,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) + len(contactedFrontiersProviders) == beacons.NumValidators(ctx.SubnetID)) // mock VM to simulate a valid but late summary is returned fullVM.CantParseStateSummary = true @@ -773,7 +773,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // other listed voters are reached out require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == beacons.Count(ctx.SubnetID)) + len(contactedVoters) == beacons.NumValidators(ctx.SubnetID)) } func TestVotesForUnknownSummariesAreDropped(t *testing.T) { @@ -876,7 +876,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // on unknown summary require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == beacons.Count(ctx.SubnetID)) + len(contactedVoters) == beacons.NumValidators(ctx.SubnetID)) } func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { diff --git a/snow/engine/snowman/syncer/utils_test.go b/snow/engine/snowman/syncer/utils_test.go index 4cd6e58d840e..d13b7347771b 100644 --- a/snow/engine/snowman/syncer/utils_test.go +++ b/snow/engine/snowman/syncer/utils_test.go @@ -107,7 +107,7 @@ func buildTestsObjects( startupTracker, sender, beacons, - beacons.Count(ctx.SubnetID), + beacons.NumValidators(ctx.SubnetID), alpha, nil, fullVM, diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 45ba32c0e261..dd2a8f88d1f8 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -80,8 +80,11 @@ type Manager interface { // If an error is returned, the set will be unmodified. RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error - // Count returns the number of validators currently in the subnet. - Count(subnetID ids.ID) int + // NumSubnets returns the number of subnets with non-zero weight. + NumSubnets() int + + // NumValidators returns the number of validators currently in the subnet. + NumValidators(subnetID ids.ID) int // TotalWeight returns the cumulative weight of all validators in the subnet. // Returns err if total weight overflows uint64. @@ -227,7 +230,14 @@ func (m *manager) RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64 return nil } -func (m *manager) Count(subnetID ids.ID) int { +func (m *manager) NumSubnets() int { + m.lock.RLock() + defer m.lock.RUnlock() + + return len(m.subnetToVdrs) +} + +func (m *manager) NumValidators(subnetID ids.ID) int { m.lock.RLock() set, exists := m.subnetToVdrs[subnetID] m.lock.RUnlock() diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index 365d7ffdf7d7..4449a324a57d 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -242,36 +242,57 @@ func TestGet(t *testing.T) { require.False(ok) } -func TestLen(t *testing.T) { - require := require.New(t) +func TestNum(t *testing.T) { + var ( + require = require.New(t) - m := NewManager() - subnetID := ids.GenerateTestID() + m = NewManager() - count := m.Count(subnetID) - require.Zero(count) + subnetID0 = ids.GenerateTestID() + subnetID1 = ids.GenerateTestID() + nodeID0 = ids.GenerateTestNodeID() + nodeID1 = ids.GenerateTestNodeID() + ) - nodeID0 := ids.GenerateTestNodeID() - require.NoError(m.AddStaker(subnetID, nodeID0, nil, ids.Empty, 1)) + require.Zero(m.NumSubnets()) + require.Zero(m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) - count = m.Count(subnetID) - require.Equal(1, count) + require.NoError(m.AddStaker(subnetID0, nodeID0, nil, ids.Empty, 1)) - nodeID1 := ids.GenerateTestNodeID() - require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, 1)) + require.Equal(1, m.NumSubnets()) + require.Equal(1, m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) - count = m.Count(subnetID) - require.Equal(2, count) + require.NoError(m.AddStaker(subnetID0, nodeID1, nil, ids.Empty, 1)) - require.NoError(m.RemoveWeight(subnetID, nodeID1, 1)) + require.Equal(1, m.NumSubnets()) + require.Equal(2, m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) - count = m.Count(subnetID) - require.Equal(1, count) + require.NoError(m.AddStaker(subnetID1, nodeID1, nil, ids.Empty, 2)) - require.NoError(m.RemoveWeight(subnetID, nodeID0, 1)) + require.Equal(2, m.NumSubnets()) + require.Equal(2, m.NumValidators(subnetID0)) + require.Equal(1, m.NumValidators(subnetID1)) + + require.NoError(m.RemoveWeight(subnetID0, nodeID1, 1)) + + require.Equal(2, m.NumSubnets()) + require.Equal(1, m.NumValidators(subnetID0)) + require.Equal(1, m.NumValidators(subnetID1)) + + require.NoError(m.RemoveWeight(subnetID0, nodeID0, 1)) + + require.Equal(1, m.NumSubnets()) + require.Zero(m.NumValidators(subnetID0)) + require.Equal(1, m.NumValidators(subnetID1)) + + require.NoError(m.RemoveWeight(subnetID1, nodeID1, 2)) - count = m.Count(subnetID) - require.Zero(count) + require.Zero(m.NumSubnets()) + require.Zero(m.NumValidators(subnetID0)) + require.Zero(m.NumValidators(subnetID1)) } func TestGetMap(t *testing.T) { diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index be6bee28cf0f..09a22f8c27de 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1750,13 +1750,13 @@ func (s *state) loadPendingValidators() error { // Invariant: initValidatorSets requires loadCurrentValidators to have already // been called. func (s *state) initValidatorSets() error { + if s.validators.NumSubnets() != 0 { + // Enforce the invariant that the validator set is empty here. + return errValidatorSetAlreadyPopulated + } + primaryNetworkValidators := s.currentStakers.validators[constants.PrimaryNetworkID] for subnetID, subnetValidators := range s.currentStakers.validators { - if s.validators.Count(subnetID) != 0 { - // Enforce the invariant that the validator set is empty here. - return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) - } - for nodeID, subnetValidator := range subnetValidators { // The subnet validator's Public Key is inherited from the // corresponding primary network validator. diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 7048778b19bd..f377fd31f894 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -283,7 +283,7 @@ func TestGenesis(t *testing.T) { } // Ensure current validator set of primary network is correct - require.Len(genesisState.Validators, vm.Validators.Count(constants.PrimaryNetworkID)) + require.Len(genesisState.Validators, vm.Validators.NumValidators(constants.PrimaryNetworkID)) for _, nodeID := range genesistest.DefaultNodeIDs { _, ok := vm.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) @@ -1326,7 +1326,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { AllGetsServer: snowGetHandler, Ctx: consensusCtx, Beacons: beacons, - SampleK: beacons.Count(ctx.SubnetID), + SampleK: beacons.NumValidators(ctx.SubnetID), StartupTracker: startup, PeerTracker: peerTracker, Sender: sender, From 3621e53fa19e65df8ab977915876a29b648f9737 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 12:50:25 -0400 Subject: [PATCH 131/155] add comment --- vms/platformvm/state/state_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index bc3ab907c7c7..df6e92c7b088 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -2040,6 +2040,8 @@ func TestLoadSubnetOnlyValidatorAndLegacy(t *testing.T) { require.Equal(expectedValidatorSet, validatorSet) } +// TestSubnetOnlyValidatorAfterLegacyRemoval verifies that a legacy validator +// can be replaced by an SoV in the same block. func TestSubnetOnlyValidatorAfterLegacyRemoval(t *testing.T) { require := require.New(t) From 92a2277da4f7daf1880d5eb5a9c623a0012e000c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 13:19:44 -0400 Subject: [PATCH 132/155] Delete empty entries --- vms/platformvm/state/state.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index b9c975599f56..488e593779b8 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2649,7 +2649,13 @@ func writePendingDiff( func (s *state) writeSubnetOnlyValidators() error { // Write modified weights: for subnetID, weight := range s.sovDiff.modifiedTotalWeight { - if err := database.PutUInt64(s.weightsDB, subnetID[:], weight); err != nil { + var err error + if weight == 0 { + err = s.weightsDB.Delete(subnetID[:]) + } else { + err = database.PutUInt64(s.weightsDB, subnetID[:], weight) + } + if err != nil { return err } } From 6375aa2667a9fb4013b3ade5ab9a2788dd04d215 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 17:36:43 -0400 Subject: [PATCH 133/155] simplify state futher --- vms/platformvm/state/state.go | 90 ++++++------------- vms/platformvm/state/subnet_only_validator.go | 52 +++++++++++ 2 files changed, 78 insertions(+), 64 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 488e593779b8..2dc3dfa0143a 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -329,8 +329,7 @@ type state struct { expiryDiff *expiryDiff expiryDB database.Database - activeSOVLookup map[ids.ID]SubnetOnlyValidator - activeSOVs *btree.BTreeG[SubnetOnlyValidator] + activeSOVs *activeSubnetOnlyValidators sovDiff *subnetOnlyValidatorsDiff subnetOnlyValidatorsDB database.Database weightsDB database.Database @@ -657,8 +656,7 @@ func New( expiryDiff: newExpiryDiff(), expiryDB: prefixdb.New(ExpiryReplayProtectionPrefix, baseDB), - activeSOVLookup: make(map[ids.ID]SubnetOnlyValidator), - activeSOVs: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), + activeSOVs: newActiveSubnetOnlyValidators(), sovDiff: newSubnetOnlyValidatorsDiff(), subnetOnlyValidatorsDB: subnetOnlyValidatorsDB, weightsDB: prefixdb.New(WeightsPrefix, subnetOnlyValidatorsDB), @@ -763,12 +761,12 @@ func (s *state) DeleteExpiry(entry ExpiryEntry) { func (s *state) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) { return s.sovDiff.getActiveSubnetOnlyValidatorsIterator( - iterator.FromTree(s.activeSOVs), + s.activeSOVs.newIterator(), ), nil } func (s *state) NumActiveSubnetOnlyValidators() int { - return len(s.activeSOVLookup) + s.sovDiff.numAddedActive + return s.activeSOVs.len() + s.sovDiff.numAddedActive } func (s *state) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { @@ -795,7 +793,7 @@ func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator // SubnetOnlyValidator with the given validationID. It is guaranteed that any // returned validator is either active or inactive. func (s *state) getPersistedSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { - if sov, ok := s.activeSOVLookup[validationID]; ok { + if sov, ok := s.activeSOVs.get(validationID); ok { return sov, nil } @@ -1602,8 +1600,7 @@ func (s *state) loadActiveSubnetOnlyValidators() error { return fmt.Errorf("failed to unmarshal SubnetOnlyValidator: %w", err) } - s.activeSOVLookup[validationID] = sov - s.activeSOVs.ReplaceOrInsert(sov) + s.activeSOVs.put(sov) } return nil @@ -1872,11 +1869,8 @@ func (s *state) initValidatorSets() error { } // Load ACP77 validators - for validationID, sov := range s.activeSOVLookup { - pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) - if err := s.validators.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { - return err - } + if err := s.activeSOVs.addStakers(s.validators); err != nil { + return err } // Load inactive weights @@ -2660,72 +2654,40 @@ func (s *state) writeSubnetOnlyValidators() error { } } - sovChanges := s.sovDiff.modified - // Perform deletions: - for validationID, sov := range sovChanges { - if !sov.isDeleted() { - // Additions and modifications are handled in the next loops. - continue + for validationID, sov := range s.sovDiff.modified { + // Delete the prior validator if it exists + var err error + if s.activeSOVs.delete(validationID) { + err = deleteSubnetOnlyValidator(s.activeDB, validationID) + } else { + err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) + } + if err != nil { + return err } - // The next loops shouldn't consider this change. - delete(sovChanges, validationID) - + // Update the subnetIDNodeID mapping subnetIDNodeID := subnetIDNodeID{ subnetID: sov.SubnetID, nodeID: sov.NodeID, } subnetIDNodeIDKey := subnetIDNodeID.Marshal() - err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey) - if err != nil { - return err - } - - priorSOV, wasActive := s.activeSOVLookup[validationID] - if wasActive { - delete(s.activeSOVLookup, validationID) - s.activeSOVs.Delete(priorSOV) - err = deleteSubnetOnlyValidator(s.activeDB, validationID) + if sov.isDeleted() { + err = s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey) } else { - // It is technically possible for the validator not to exist on disk - // here, but that's fine as deleting an entry that doesn't exist is - // a noop. - err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) + err = s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]) } if err != nil { return err } - } - // Perform modifications and additions: - for validationID, sov := range sovChanges { - priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) - switch err { - case nil: - // This is modifying an existing validator - if priorSOV.isActive() { - delete(s.activeSOVLookup, validationID) - s.activeSOVs.Delete(priorSOV) - err = deleteSubnetOnlyValidator(s.activeDB, validationID) - } else { - err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) - } - case database.ErrNotFound: - // This is a new validator - subnetIDNodeID := subnetIDNodeID{ - subnetID: sov.SubnetID, - nodeID: sov.NodeID, - } - subnetIDNodeIDKey := subnetIDNodeID.Marshal() - err = s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]) - } - if err != nil { - return err + if sov.isDeleted() { + continue } + // Add the new validator if sov.isActive() { - s.activeSOVLookup[validationID] = sov - s.activeSOVs.ReplaceOrInsert(sov) + s.activeSOVs.put(sov) err = putSubnetOnlyValidator(s.activeDB, sov) } else { err = putSubnetOnlyValidator(s.inactiveDB, sov) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 24ea8bde739e..0814e304d068 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" @@ -317,3 +318,54 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne } return nil } + +type activeSubnetOnlyValidators struct { + lookup map[ids.ID]SubnetOnlyValidator + tree *btree.BTreeG[SubnetOnlyValidator] +} + +func newActiveSubnetOnlyValidators() *activeSubnetOnlyValidators { + return &activeSubnetOnlyValidators{ + lookup: make(map[ids.ID]SubnetOnlyValidator), + tree: btree.NewG(defaultTreeDegree, SubnetOnlyValidator.Less), + } +} + +func (a *activeSubnetOnlyValidators) get(validationID ids.ID) (SubnetOnlyValidator, bool) { + sov, ok := a.lookup[validationID] + return sov, ok +} + +func (a *activeSubnetOnlyValidators) put(sov SubnetOnlyValidator) { + a.lookup[sov.ValidationID] = sov + a.tree.ReplaceOrInsert(sov) +} + +func (a *activeSubnetOnlyValidators) delete(validationID ids.ID) bool { + sov, ok := a.lookup[validationID] + if !ok { + return false + } + + delete(a.lookup, validationID) + a.tree.Delete(sov) + return true +} + +func (a *activeSubnetOnlyValidators) len() int { + return len(a.lookup) +} + +func (a *activeSubnetOnlyValidators) newIterator() iterator.Iterator[SubnetOnlyValidator] { + return iterator.FromTree(a.tree) +} + +func (a *activeSubnetOnlyValidators) addStakers(vdrs validators.Manager) error { + for validationID, sov := range a.lookup { + pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) + if err := vdrs.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { + return err + } + } + return nil +} From aedff159444e006e901d4c7352049281edfbf58a Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 17:44:25 -0400 Subject: [PATCH 134/155] nit --- vms/platformvm/state/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 2dc3dfa0143a..54e07902e3a8 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2641,7 +2641,7 @@ func writePendingDiff( } func (s *state) writeSubnetOnlyValidators() error { - // Write modified weights: + // Write modified weights for subnetID, weight := range s.sovDiff.modifiedTotalWeight { var err error if weight == 0 { From 1ac030ac06f5b24f79bef12ae69e05a0b7adbe16 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Wed, 30 Oct 2024 17:46:49 -0400 Subject: [PATCH 135/155] nit --- vms/platformvm/state/state.go | 18 ++---------------- vms/platformvm/state/subnet_only_validator.go | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 54e07902e3a8..cbdda6537a53 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2231,20 +2231,6 @@ func (s *state) getInheritedPublicKey(nodeID ids.NodeID) (*bls.PublicKey, error) return nil, fmt.Errorf("%w: %s", errMissingPrimaryNetworkValidator, nodeID) } -func (s *state) addSoVToValidatorManager(sov SubnetOnlyValidator) error { - nodeID := sov.effectiveNodeID() - if s.validators.GetWeight(sov.SubnetID, nodeID) != 0 { - return s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight) - } - return s.validators.AddStaker( - sov.SubnetID, - nodeID, - sov.effectivePublicKey(), - sov.effectiveValidationID(), - sov.Weight, - ) -} - // updateValidatorManager updates the validator manager with the pending // validator set changes. // @@ -2323,7 +2309,7 @@ func (s *state) updateValidatorManager(updateValidators bool) error { // This validator's active status is changing. err = errors.Join( s.validators.RemoveWeight(sov.SubnetID, priorSOV.effectiveNodeID(), priorSOV.Weight), - s.addSoVToValidatorManager(sov), + addSoVToValidatorManager(s.validators, sov), ) } } @@ -2335,7 +2321,7 @@ func (s *state) updateValidatorManager(updateValidators bool) error { } // Adding a validator - err = s.addSoVToValidatorManager(sov) + err = addSoVToValidatorManager(s.validators, sov) } if err != nil { return err diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 0814e304d068..a299cd478c3e 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -369,3 +369,17 @@ func (a *activeSubnetOnlyValidators) addStakers(vdrs validators.Manager) error { } return nil } + +func addSoVToValidatorManager(vdrs validators.Manager, sov SubnetOnlyValidator) error { + nodeID := sov.effectiveNodeID() + if vdrs.GetWeight(sov.SubnetID, nodeID) != 0 { + return vdrs.AddWeight(sov.SubnetID, nodeID, sov.Weight) + } + return vdrs.AddStaker( + sov.SubnetID, + nodeID, + sov.effectivePublicKey(), + sov.effectiveValidationID(), + sov.Weight, + ) +} From 9e0d7d5efe5ef7fc801b7a747d08123a515f37a4 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 11:03:00 -0400 Subject: [PATCH 136/155] Add caching --- vms/platformvm/config/execution_config.go | 54 +++++---- .../config/execution_config_test.go | 25 ++-- vms/platformvm/state/state.go | 107 +++++++++++++++--- 3 files changed, 137 insertions(+), 49 deletions(-) diff --git a/vms/platformvm/config/execution_config.go b/vms/platformvm/config/execution_config.go index e5bef1637d05..2c63c6299e58 100644 --- a/vms/platformvm/config/execution_config.go +++ b/vms/platformvm/config/execution_config.go @@ -12,34 +12,40 @@ import ( ) var DefaultExecutionConfig = ExecutionConfig{ - Network: network.DefaultConfig, - BlockCacheSize: 64 * units.MiB, - TxCacheSize: 128 * units.MiB, - TransformedSubnetTxCacheSize: 4 * units.MiB, - RewardUTXOsCacheSize: 2048, - ChainCacheSize: 2048, - ChainDBCacheSize: 2048, - BlockIDCacheSize: 8192, - FxOwnerCacheSize: 4 * units.MiB, - SubnetConversionCacheSize: 4 * units.MiB, - ChecksumsEnabled: false, - MempoolPruneFrequency: 30 * time.Minute, + Network: network.DefaultConfig, + BlockCacheSize: 64 * units.MiB, + TxCacheSize: 128 * units.MiB, + TransformedSubnetTxCacheSize: 4 * units.MiB, + RewardUTXOsCacheSize: 2048, + ChainCacheSize: 2048, + ChainDBCacheSize: 2048, + BlockIDCacheSize: 8192, + FxOwnerCacheSize: 4 * units.MiB, + SubnetConversionCacheSize: 4 * units.MiB, + L1WeightsCacheSize: 16 * units.KiB, + L1InactiveValidatorsCacheSize: 256 * units.KiB, + L1SubnetIDNodeIDCacheSize: 16 * units.KiB, + ChecksumsEnabled: false, + MempoolPruneFrequency: 30 * time.Minute, } // ExecutionConfig provides execution parameters of PlatformVM type ExecutionConfig struct { - Network network.Config `json:"network"` - BlockCacheSize int `json:"block-cache-size"` - TxCacheSize int `json:"tx-cache-size"` - TransformedSubnetTxCacheSize int `json:"transformed-subnet-tx-cache-size"` - RewardUTXOsCacheSize int `json:"reward-utxos-cache-size"` - ChainCacheSize int `json:"chain-cache-size"` - ChainDBCacheSize int `json:"chain-db-cache-size"` - BlockIDCacheSize int `json:"block-id-cache-size"` - FxOwnerCacheSize int `json:"fx-owner-cache-size"` - SubnetConversionCacheSize int `json:"subnet-conversion-cache-size"` - ChecksumsEnabled bool `json:"checksums-enabled"` - MempoolPruneFrequency time.Duration `json:"mempool-prune-frequency"` + Network network.Config `json:"network"` + BlockCacheSize int `json:"block-cache-size"` + TxCacheSize int `json:"tx-cache-size"` + TransformedSubnetTxCacheSize int `json:"transformed-subnet-tx-cache-size"` + RewardUTXOsCacheSize int `json:"reward-utxos-cache-size"` + ChainCacheSize int `json:"chain-cache-size"` + ChainDBCacheSize int `json:"chain-db-cache-size"` + BlockIDCacheSize int `json:"block-id-cache-size"` + FxOwnerCacheSize int `json:"fx-owner-cache-size"` + SubnetConversionCacheSize int `json:"subnet-conversion-cache-size"` + L1WeightsCacheSize int `json:"l1-weights-cache-size"` + L1InactiveValidatorsCacheSize int `json:"l1-inactive-validators-cache-size"` + L1SubnetIDNodeIDCacheSize int `json:"l1-subnet-id-node-id-cache-size"` + ChecksumsEnabled bool `json:"checksums-enabled"` + MempoolPruneFrequency time.Duration `json:"mempool-prune-frequency"` } // GetExecutionConfig returns an ExecutionConfig diff --git a/vms/platformvm/config/execution_config_test.go b/vms/platformvm/config/execution_config_test.go index c938c177add3..f4b077689b23 100644 --- a/vms/platformvm/config/execution_config_test.go +++ b/vms/platformvm/config/execution_config_test.go @@ -81,17 +81,20 @@ func TestExecutionConfigUnmarshal(t *testing.T) { ExpectedBloomFilterFalsePositiveProbability: 16, MaxBloomFilterFalsePositiveProbability: 17, }, - BlockCacheSize: 1, - TxCacheSize: 2, - TransformedSubnetTxCacheSize: 3, - RewardUTXOsCacheSize: 5, - ChainCacheSize: 6, - ChainDBCacheSize: 7, - BlockIDCacheSize: 8, - FxOwnerCacheSize: 9, - SubnetConversionCacheSize: 10, - ChecksumsEnabled: true, - MempoolPruneFrequency: time.Minute, + BlockCacheSize: 1, + TxCacheSize: 2, + TransformedSubnetTxCacheSize: 3, + RewardUTXOsCacheSize: 5, + ChainCacheSize: 6, + ChainDBCacheSize: 7, + BlockIDCacheSize: 8, + FxOwnerCacheSize: 9, + SubnetConversionCacheSize: 10, + L1WeightsCacheSize: 11, + L1InactiveValidatorsCacheSize: 12, + L1SubnetIDNodeIDCacheSize: 13, + ChecksumsEnabled: true, + MempoolPruneFrequency: time.Minute, } verifyInitializedStruct(t, *expected) verifyInitializedStruct(t, expected.Network) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index cbdda6537a53..b059eeb4595e 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -34,6 +34,7 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -86,7 +87,7 @@ var ( SupplyPrefix = []byte("supply") ChainPrefix = []byte("chain") ExpiryReplayProtectionPrefix = []byte("expiryReplayProtection") - SubnetOnlyValidatorsPrefix = []byte("subnetOnlyValidators") + SubnetOnlyPrefix = []byte("subnetOnly") WeightsPrefix = []byte("weights") SubnetIDNodeIDPrefix = []byte("subnetIDNodeID") ActivePrefix = []byte("active") @@ -273,6 +274,15 @@ type stateBlk struct { * | | '-. subnetDelegator * | | '-. list * | | '-- txID -> nil + * | |-. subnetOnly + * | | |-. weights + * | | | '-- subnetID -> weight + * | | |-. subnetIDNodeID + * | | | '-- subnetID+nodeID -> validationID + * | | |-. active + * | | | '-- validationID -> subnetOnlyValidator + * | | '-. inactive + * | | '-- validationID -> subnetOnlyValidator * | |-. weight diffs * | | '-- subnet+height+nodeID -> weightChange * | '-. pub key diffs @@ -332,9 +342,12 @@ type state struct { activeSOVs *activeSubnetOnlyValidators sovDiff *subnetOnlyValidatorsDiff subnetOnlyValidatorsDB database.Database + weightsCache cache.Cacher[ids.ID, uint64] // subnetID -> total SoV weight weightsDB database.Database + subnetIDNodeIDCache cache.Cacher[subnetIDNodeID, bool] // subnetID+nodeID -> is validator subnetIDNodeIDDB database.Database activeDB database.Database + inactiveCache cache.Cacher[ids.ID, maybe.Maybe[SubnetOnlyValidator]] // validationID -> SubnetOnlyValidator inactiveDB database.Database currentStakers *baseStakers @@ -528,8 +541,6 @@ func New( baseDB := versiondb.New(db) - subnetOnlyValidatorsDB := prefixdb.New(SubnetOnlyValidatorsPrefix, baseDB) - validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB) currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB) @@ -544,9 +555,55 @@ func New( pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB) pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB) + subnetOnlyValidatorsDB := prefixdb.New(SubnetOnlyPrefix, validatorsDB) + validatorWeightDiffsDB := prefixdb.New(ValidatorWeightDiffsPrefix, validatorsDB) validatorPublicKeyDiffsDB := prefixdb.New(ValidatorPublicKeyDiffsPrefix, validatorsDB) + weightsCache, err := metercacher.New( + "sov_weights_cache", + metricsReg, + cache.NewSizedLRU[ids.ID, uint64](execCfg.L1WeightsCacheSize, func(ids.ID, uint64) int { + return ids.IDLen + wrappers.LongLen + }), + ) + if err != nil { + return nil, err + } + + inactiveSOVsCache, err := metercacher.New( + "sov_inactive_cache", + metricsReg, + cache.NewSizedLRU[ids.ID, maybe.Maybe[SubnetOnlyValidator]]( + execCfg.L1InactiveValidatorsCacheSize, + func(_ ids.ID, maybeSOV maybe.Maybe[SubnetOnlyValidator]) int { + const sovOverhead = ids.IDLen + ids.NodeIDLen + 4*wrappers.LongLen + 3*constants.PointerOverhead + const maybeSOVOverhead = wrappers.BoolLen + sovOverhead + const overhead = ids.IDLen + maybeSOVOverhead + if maybeSOV.IsNothing() { + return overhead + } + + sov := maybeSOV.Value() + return overhead + len(sov.PublicKey) + len(sov.RemainingBalanceOwner) + len(sov.DeactivationOwner) + }, + ), + ) + if err != nil { + return nil, err + } + + subnetIDNodeIDCache, err := metercacher.New( + "sov_subnet_id_node_id_cache", + metricsReg, + cache.NewSizedLRU[subnetIDNodeID, bool](execCfg.L1SubnetIDNodeIDCacheSize, func(subnetIDNodeID, bool) int { + return ids.IDLen + ids.NodeIDLen + wrappers.BoolLen + }), + ) + if err != nil { + return nil, err + } + txCache, err := metercacher.New( "tx_cache", metricsReg, @@ -659,9 +716,12 @@ func New( activeSOVs: newActiveSubnetOnlyValidators(), sovDiff: newSubnetOnlyValidatorsDiff(), subnetOnlyValidatorsDB: subnetOnlyValidatorsDB, + weightsCache: weightsCache, weightsDB: prefixdb.New(WeightsPrefix, subnetOnlyValidatorsDB), + subnetIDNodeIDCache: subnetIDNodeIDCache, subnetIDNodeIDDB: prefixdb.New(SubnetIDNodeIDPrefix, subnetOnlyValidatorsDB), activeDB: prefixdb.New(ActivePrefix, subnetOnlyValidatorsDB), + inactiveCache: inactiveSOVsCache, inactiveDB: prefixdb.New(InactivePrefix, subnetOnlyValidatorsDB), currentStakers: newBaseStakers(), @@ -774,7 +834,10 @@ func (s *state) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { return weight, nil } - // TODO: Add caching + if weight, ok := s.weightsCache.Get(subnetID); ok { + return weight, nil + } + return database.WithDefault(database.GetUInt64, s.weightsDB, subnetID[:], 0) } @@ -797,7 +860,13 @@ func (s *state) getPersistedSubnetOnlyValidator(validationID ids.ID) (SubnetOnly return sov, nil } - // TODO: Add caching + if maybeSOV, ok := s.inactiveCache.Get(validationID); ok { + if maybeSOV.IsNothing() { + return SubnetOnlyValidator{}, database.ErrNotFound + } + return maybeSOV.Value(), nil + } + return getSubnetOnlyValidator(s.inactiveDB, validationID) } @@ -810,9 +879,11 @@ func (s *state) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool subnetID: subnetID, nodeID: nodeID, } - key := subnetIDNodeID.Marshal() + if has, ok := s.subnetIDNodeIDCache.Get(subnetIDNodeID); ok { + return has, nil + } - // TODO: Add caching + key := subnetIDNodeID.Marshal() return s.subnetIDNodeIDDB.Has(key) } @@ -2638,6 +2709,8 @@ func (s *state) writeSubnetOnlyValidators() error { if err != nil { return err } + + s.weightsCache.Put(subnetID, weight) } for validationID, sov := range s.sovDiff.modified { @@ -2646,6 +2719,7 @@ func (s *state) writeSubnetOnlyValidators() error { if s.activeSOVs.delete(validationID) { err = deleteSubnetOnlyValidator(s.activeDB, validationID) } else { + s.inactiveCache.Put(validationID, maybe.Nothing[SubnetOnlyValidator]()) err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) } if err != nil { @@ -2653,12 +2727,15 @@ func (s *state) writeSubnetOnlyValidators() error { } // Update the subnetIDNodeID mapping - subnetIDNodeID := subnetIDNodeID{ - subnetID: sov.SubnetID, - nodeID: sov.NodeID, - } - subnetIDNodeIDKey := subnetIDNodeID.Marshal() - if sov.isDeleted() { + var ( + isDeleted = sov.isDeleted() + subnetIDNodeID = subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + subnetIDNodeIDKey = subnetIDNodeID.Marshal() + ) + if isDeleted { err = s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey) } else { err = s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]) @@ -2667,7 +2744,8 @@ func (s *state) writeSubnetOnlyValidators() error { return err } - if sov.isDeleted() { + s.subnetIDNodeIDCache.Put(subnetIDNodeID, !isDeleted) + if isDeleted { continue } @@ -2676,6 +2754,7 @@ func (s *state) writeSubnetOnlyValidators() error { s.activeSOVs.put(sov) err = putSubnetOnlyValidator(s.activeDB, sov) } else { + s.inactiveCache.Put(validationID, maybe.Some(sov)) err = putSubnetOnlyValidator(s.inactiveDB, sov) } if err != nil { From 9993b05f81e521e1b9ea6185eb66024e4e47e6e1 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 11:07:01 -0400 Subject: [PATCH 137/155] nit --- vms/platformvm/state/state.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index b059eeb4595e..1abc1e227b1e 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -577,9 +577,11 @@ func New( cache.NewSizedLRU[ids.ID, maybe.Maybe[SubnetOnlyValidator]]( execCfg.L1InactiveValidatorsCacheSize, func(_ ids.ID, maybeSOV maybe.Maybe[SubnetOnlyValidator]) int { - const sovOverhead = ids.IDLen + ids.NodeIDLen + 4*wrappers.LongLen + 3*constants.PointerOverhead - const maybeSOVOverhead = wrappers.BoolLen + sovOverhead - const overhead = ids.IDLen + maybeSOVOverhead + const ( + sovOverhead = ids.IDLen + ids.NodeIDLen + 4*wrappers.LongLen + 3*constants.PointerOverhead + maybeSOVOverhead = wrappers.BoolLen + sovOverhead + overhead = ids.IDLen + maybeSOVOverhead + ) if maybeSOV.IsNothing() { return overhead } From 547d426c08f399ba2f2a4534aac5bda8de67e301 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 11:07:32 -0400 Subject: [PATCH 138/155] nit --- vms/platformvm/state/state.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 1abc1e227b1e..cdb4f9c59483 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -580,14 +580,14 @@ func New( const ( sovOverhead = ids.IDLen + ids.NodeIDLen + 4*wrappers.LongLen + 3*constants.PointerOverhead maybeSOVOverhead = wrappers.BoolLen + sovOverhead - overhead = ids.IDLen + maybeSOVOverhead + entryOverhead = ids.IDLen + maybeSOVOverhead ) if maybeSOV.IsNothing() { - return overhead + return entryOverhead } sov := maybeSOV.Value() - return overhead + len(sov.PublicKey) + len(sov.RemainingBalanceOwner) + len(sov.DeactivationOwner) + return entryOverhead + len(sov.PublicKey) + len(sov.RemainingBalanceOwner) + len(sov.DeactivationOwner) }, ), ) From 6bcc0ead771e14c0d7108fdc3728bf040d675da9 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 11:56:42 -0400 Subject: [PATCH 139/155] Add TODOs --- vms/platformvm/state/state.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index cdb4f9c59483..14c3f5ff0f7d 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1941,12 +1941,14 @@ func (s *state) initValidatorSets() error { return errValidatorSetAlreadyPopulated } - // Load ACP77 validators + // Load active ACP-77 validators if err := s.activeSOVs.addStakers(s.validators); err != nil { return err } - // Load inactive weights + // Load inactive ACP-77 validator weights + // + // TODO: L1s with no active weight should not be held in memory. it := s.weightsDB.NewIterator() defer it.Release() @@ -2309,6 +2311,8 @@ func (s *state) getInheritedPublicKey(nodeID ids.NodeID) (*bls.PublicKey, error) // // This function must be called prior to writeCurrentStakers and // writeSubnetOnlyValidators. +// +// TODO: L1s with no active weight should not be held in memory. func (s *state) updateValidatorManager(updateValidators bool) error { if !updateValidators { return nil From a7792c738b496250e92f619bca306d6f87b7025c Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 12:21:48 -0400 Subject: [PATCH 140/155] Add config changes to readme --- RELEASES.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/RELEASES.md b/RELEASES.md index 16b371b67a12..d5c4b453189e 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,14 @@ # Release Notes +## Pending Release + +### Configs + +- Added P-chain configs + - `"l1-weights-cache-size"` + - `"l1-inactive-validators-cache-size"` + - `"l1-subnet-id-node-id-cache-size"` + ## [v1.11.11](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.11) This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. From 4723c46bdba39a88ce02d8253cbbe045df21f888 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 13:00:11 -0400 Subject: [PATCH 141/155] Improve doc for PutSubnetOnlyValidator --- vms/platformvm/state/subnet_only_validator.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index a299cd478c3e..642b081800f0 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -51,13 +51,21 @@ type SubnetOnlyValidators interface { // exists. HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) - // PutSubnetOnlyValidator inserts [sov] as a validator. + // PutSubnetOnlyValidator inserts [sov] as a validator. If the weight of the + // validator is 0, the validator is removed. // // If inserting this validator attempts to modify any of the constant fields // of the subnet only validator struct, an error will be returned. // - // If inserting this validator would cause the mapping of subnetID+nodeID to - // validationID to be non-unique, an error will be returned. + // If inserting this validator would cause the total weight of subnet only + // validators on a subnet to overflow MaxUint64, an error will be returned. + // + // If inserting this validator would cause there to be multiple validators + // with the same subnetID and nodeID pair to exist at the same time, an + // error will be returned. + // + // If an SoV with the same validationID as a previously removed SoV is + // added, the behavior is undefined. PutSubnetOnlyValidator(sov SubnetOnlyValidator) error } From f1ca6e6206e4dc01c7f0f2f5967aa06b7aad907e Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 13:17:19 -0400 Subject: [PATCH 142/155] Add test that decreases weight --- vms/platformvm/state/state_test.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index df6e92c7b088..a1a8feb6676e 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1665,6 +1665,29 @@ func TestSubnetOnlyValidators(t *testing.T) { }, }, }, + { + name: "decrease active weight", + initial: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 2, // Not removed + EndAccumulatedFee: 1, // Active + }, + }, + sovs: []SubnetOnlyValidator{ + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Decreased + EndAccumulatedFee: 1, // Active + }, + }, + }, { name: "deactivate", initial: []SubnetOnlyValidator{ From 71f88e87f9c0d78b47e5cd72125a5732eaedb53b Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 13:42:16 -0400 Subject: [PATCH 143/155] Fix regression --- vms/platformvm/state/state.go | 111 ++++++++++++++++++++++------------ 1 file changed, 74 insertions(+), 37 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 14c3f5ff0f7d..7f136ea49bef 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2364,40 +2364,58 @@ func (s *state) updateValidatorManager(updateValidators bool) error { } } + // Remove all deleted SoV validators. This must be done before adding new + // SoV validators to support the case where a validator is removed and then + // immediately re-added with a different validationID. for validationID, sov := range s.sovDiff.modified { + if !sov.isDeleted() { + continue + } + + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) + if err == database.ErrNotFound { + // Deleting a non-existent validator is a noop. This can happen if + // the validator was added and then immediately removed. + continue + } + if err != nil { + return err + } + + if err := s.validators.RemoveWeight(priorSOV.SubnetID, priorSOV.effectiveNodeID(), priorSOV.Weight); err != nil { + return err + } + } + + // Now the removed SoV validators have been deleted, perform additions and + // modifications. + for validationID, sov := range s.sovDiff.modified { + if sov.isDeleted() { + continue + } + priorSOV, err := s.getPersistedSubnetOnlyValidator(validationID) switch err { case nil: - if sov.isDeleted() { - // Removing a validator - err = s.validators.RemoveWeight(priorSOV.SubnetID, priorSOV.effectiveNodeID(), priorSOV.Weight) - } else { - // Modifying a validator - if priorSOV.isActive() == sov.isActive() { - // This validator's active status isn't changing. This means - // the effectiveNodeIDs are equal. - nodeID := sov.effectiveNodeID() - if priorSOV.Weight < sov.Weight { - err = s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight-priorSOV.Weight) - } else if priorSOV.Weight > sov.Weight { - err = s.validators.RemoveWeight(sov.SubnetID, nodeID, priorSOV.Weight-sov.Weight) - } - } else { - // This validator's active status is changing. - err = errors.Join( - s.validators.RemoveWeight(sov.SubnetID, priorSOV.effectiveNodeID(), priorSOV.Weight), - addSoVToValidatorManager(s.validators, sov), - ) + // Modifying an existing validator + if priorSOV.isActive() == sov.isActive() { + // This validator's active status isn't changing. This means + // the effectiveNodeIDs are equal. + nodeID := sov.effectiveNodeID() + if priorSOV.Weight < sov.Weight { + err = s.validators.AddWeight(sov.SubnetID, nodeID, sov.Weight-priorSOV.Weight) + } else if priorSOV.Weight > sov.Weight { + err = s.validators.RemoveWeight(sov.SubnetID, nodeID, priorSOV.Weight-sov.Weight) } + } else { + // This validator's active status is changing. + err = errors.Join( + s.validators.RemoveWeight(sov.SubnetID, priorSOV.effectiveNodeID(), priorSOV.Weight), + addSoVToValidatorManager(s.validators, sov), + ) } case database.ErrNotFound: - if sov.isDeleted() { - // Deleting a non-existent validator is a noop. This can happen - // if the validator was added and then immediately removed. - continue - } - - // Adding a validator + // Adding a new validator err = addSoVToValidatorManager(s.validators, sov) } if err != nil { @@ -2719,6 +2737,10 @@ func (s *state) writeSubnetOnlyValidators() error { s.weightsCache.Put(subnetID, weight) } + // The SoV diff application is split into two loops to ensure that all + // deletions to the subnetIDNodeIDDB happen prior to any additions. + // Otherwise replacing an SoV by deleting it and then re-adding it with a + // different validationID could result in an inconsistent state. for validationID, sov := range s.sovDiff.modified { // Delete the prior validator if it exists var err error @@ -2732,30 +2754,45 @@ func (s *state) writeSubnetOnlyValidators() error { return err } - // Update the subnetIDNodeID mapping + if !sov.isDeleted() { + continue + } + var ( - isDeleted = sov.isDeleted() subnetIDNodeID = subnetIDNodeID{ subnetID: sov.SubnetID, nodeID: sov.NodeID, } subnetIDNodeIDKey = subnetIDNodeID.Marshal() ) - if isDeleted { - err = s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey) - } else { - err = s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]) - } - if err != nil { + if err := s.subnetIDNodeIDDB.Delete(subnetIDNodeIDKey); err != nil { return err } - s.subnetIDNodeIDCache.Put(subnetIDNodeID, !isDeleted) - if isDeleted { + s.subnetIDNodeIDCache.Put(subnetIDNodeID, false) + } + + for validationID, sov := range s.sovDiff.modified { + if sov.isDeleted() { continue } + // Update the subnetIDNodeID mapping + var ( + subnetIDNodeID = subnetIDNodeID{ + subnetID: sov.SubnetID, + nodeID: sov.NodeID, + } + subnetIDNodeIDKey = subnetIDNodeID.Marshal() + ) + if err := s.subnetIDNodeIDDB.Put(subnetIDNodeIDKey, validationID[:]); err != nil { + return err + } + + s.subnetIDNodeIDCache.Put(subnetIDNodeID, true) + // Add the new validator + var err error if sov.isActive() { s.activeSOVs.put(sov) err = putSubnetOnlyValidator(s.activeDB, sov) From c2ffd1733eb4b0f4ad4bd16ed026cb4831f2b946 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Thu, 31 Oct 2024 13:43:29 -0400 Subject: [PATCH 144/155] nit --- vms/platformvm/state/state.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 7f136ea49bef..a5d39ff0079d 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2387,8 +2387,8 @@ func (s *state) updateValidatorManager(updateValidators bool) error { } } - // Now the removed SoV validators have been deleted, perform additions and - // modifications. + // Now that the removed SoV validators have been deleted, perform additions + // and modifications. for validationID, sov := range s.sovDiff.modified { if sov.isDeleted() { continue From 66011f0069ad4df9462e6bb96f14e8325018d8d0 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 1 Nov 2024 13:50:29 -0400 Subject: [PATCH 145/155] Move caching logic --- vms/platformvm/state/state.go | 23 ++-- vms/platformvm/state/subnet_only_validator.go | 45 +++++-- .../state/subnet_only_validator_test.go | 111 ++++++++++++++---- 3 files changed, 135 insertions(+), 44 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index a5d39ff0079d..b7b1341bd23f 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -104,6 +104,8 @@ var ( HeightsIndexedKey = []byte("heights indexed") InitializedKey = []byte("initialized") BlocksReindexedKey = []byte("blocks reindexed") + + emptySoVCache = &cache.Empty[ids.ID, maybe.Maybe[SubnetOnlyValidator]]{} ) // Chain collects all methods to manage the state of the chain for block @@ -862,14 +864,7 @@ func (s *state) getPersistedSubnetOnlyValidator(validationID ids.ID) (SubnetOnly return sov, nil } - if maybeSOV, ok := s.inactiveCache.Get(validationID); ok { - if maybeSOV.IsNothing() { - return SubnetOnlyValidator{}, database.ErrNotFound - } - return maybeSOV.Value(), nil - } - - return getSubnetOnlyValidator(s.inactiveDB, validationID) + return getSubnetOnlyValidator(s.inactiveCache, s.inactiveDB, validationID) } func (s *state) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool, error) { @@ -1942,7 +1937,7 @@ func (s *state) initValidatorSets() error { } // Load active ACP-77 validators - if err := s.activeSOVs.addStakers(s.validators); err != nil { + if err := s.activeSOVs.addStakersToValidatorManager(s.validators); err != nil { return err } @@ -2745,10 +2740,9 @@ func (s *state) writeSubnetOnlyValidators() error { // Delete the prior validator if it exists var err error if s.activeSOVs.delete(validationID) { - err = deleteSubnetOnlyValidator(s.activeDB, validationID) + err = deleteSubnetOnlyValidator(s.activeDB, emptySoVCache, validationID) } else { - s.inactiveCache.Put(validationID, maybe.Nothing[SubnetOnlyValidator]()) - err = deleteSubnetOnlyValidator(s.inactiveDB, validationID) + err = deleteSubnetOnlyValidator(s.inactiveDB, s.inactiveCache, validationID) } if err != nil { return err @@ -2795,10 +2789,9 @@ func (s *state) writeSubnetOnlyValidators() error { var err error if sov.isActive() { s.activeSOVs.put(sov) - err = putSubnetOnlyValidator(s.activeDB, sov) + err = putSubnetOnlyValidator(s.activeDB, emptySoVCache, sov) } else { - s.inactiveCache.Put(validationID, maybe.Some(sov)) - err = putSubnetOnlyValidator(s.inactiveDB, sov) + err = putSubnetOnlyValidator(s.inactiveDB, s.inactiveCache, sov) } if err != nil { return err diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 642b081800f0..556ba3dd27b9 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -10,6 +10,7 @@ import ( "github.com/google/btree" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" @@ -17,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/iterator" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/vms/platformvm/block" ) @@ -184,7 +186,18 @@ func (v SubnetOnlyValidator) effectivePublicKeyBytes() []byte { return nil } -func getSubnetOnlyValidator(db database.KeyValueReader, validationID ids.ID) (SubnetOnlyValidator, error) { +func getSubnetOnlyValidator( + cache cache.Cacher[ids.ID, maybe.Maybe[SubnetOnlyValidator]], + db database.KeyValueReader, + validationID ids.ID, +) (SubnetOnlyValidator, error) { + if maybeSOV, ok := cache.Get(validationID); ok { + if maybeSOV.IsNothing() { + return SubnetOnlyValidator{}, database.ErrNotFound + } + return maybeSOV.Value(), nil + } + bytes, err := db.Get(validationID[:]) if err != nil { return SubnetOnlyValidator{}, err @@ -199,16 +212,34 @@ func getSubnetOnlyValidator(db database.KeyValueReader, validationID ids.ID) (Su return vdr, nil } -func putSubnetOnlyValidator(db database.KeyValueWriter, vdr SubnetOnlyValidator) error { - bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, vdr) +func putSubnetOnlyValidator( + db database.KeyValueWriter, + cache cache.Cacher[ids.ID, maybe.Maybe[SubnetOnlyValidator]], + sov SubnetOnlyValidator, +) error { + bytes, err := block.GenesisCodec.Marshal(block.CodecVersion, sov) if err != nil { return fmt.Errorf("failed to marshal SubnetOnlyValidator: %w", err) } - return db.Put(vdr.ValidationID[:], bytes) + if err := db.Put(sov.ValidationID[:], bytes); err != nil { + return err + } + + cache.Put(sov.ValidationID, maybe.Some(sov)) + return nil } -func deleteSubnetOnlyValidator(db database.KeyValueDeleter, validationID ids.ID) error { - return db.Delete(validationID[:]) +func deleteSubnetOnlyValidator( + db database.KeyValueDeleter, + cache cache.Cacher[ids.ID, maybe.Maybe[SubnetOnlyValidator]], + validationID ids.ID, +) error { + if err := db.Delete(validationID[:]); err != nil { + return err + } + + cache.Put(validationID, maybe.Nothing[SubnetOnlyValidator]()) + return nil } type subnetOnlyValidatorsDiff struct { @@ -368,7 +399,7 @@ func (a *activeSubnetOnlyValidators) newIterator() iterator.Iterator[SubnetOnlyV return iterator.FromTree(a.tree) } -func (a *activeSubnetOnlyValidators) addStakers(vdrs validators.Manager) error { +func (a *activeSubnetOnlyValidators) addStakersToValidatorManager(vdrs validators.Manager) error { for validationID, sov := range a.lookup { pk := bls.PublicKeyFromValidUncompressedBytes(sov.PublicKey) if err := vdrs.AddStaker(sov.SubnetID, sov.NodeID, pk, validationID, sov.Weight); err != nil { diff --git a/vms/platformvm/state/subnet_only_validator_test.go b/vms/platformvm/state/subnet_only_validator_test.go index 6b6c86520a66..b7399eee195b 100644 --- a/vms/platformvm/state/subnet_only_validator_test.go +++ b/vms/platformvm/state/subnet_only_validator_test.go @@ -9,11 +9,13 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -139,11 +141,8 @@ func TestSubnetOnlyValidator_constantsAreUnmodified(t *testing.T) { } func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { - require := require.New(t) - db := memdb.New() - sk, err := bls.NewSecretKey() - require.NoError(err) + require.NoError(t, err) pk := bls.PublicFromSecretKey(sk) pkBytes := bls.PublicKeyToUncompressedBytes(pk) @@ -154,7 +153,7 @@ func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { }, } remainingBalanceOwnerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &remainingBalanceOwner) - require.NoError(err) + require.NoError(t, err) var deactivationOwner fx.Owner = &secp256k1fx.OutputOwners{ Threshold: 1, @@ -163,9 +162,9 @@ func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { }, } deactivationOwnerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &deactivationOwner) - require.NoError(err) + require.NoError(t, err) - vdr := SubnetOnlyValidator{ + sov := SubnetOnlyValidator{ ValidationID: ids.GenerateTestID(), SubnetID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), @@ -178,24 +177,92 @@ func TestSubnetOnlyValidator_DatabaseHelpers(t *testing.T) { EndAccumulatedFee: rand.Uint64(), // #nosec G404 } - // Validator hasn't been put on disk yet - gotVdr, err := getSubnetOnlyValidator(db, vdr.ValidationID) - require.ErrorIs(err, database.ErrNotFound) - require.Zero(gotVdr) + var ( + addedDB = memdb.New() + removedDB = memdb.New() + addedAndRemovedDB = memdb.New() + addedAndRemovedAndAddedDB = memdb.New() + + addedCache = &cache.LRU[ids.ID, maybe.Maybe[SubnetOnlyValidator]]{Size: 10} + removedCache = &cache.LRU[ids.ID, maybe.Maybe[SubnetOnlyValidator]]{Size: 10} + addedAndRemovedCache = &cache.LRU[ids.ID, maybe.Maybe[SubnetOnlyValidator]]{Size: 10} + addedAndRemovedAndAddedCache = &cache.LRU[ids.ID, maybe.Maybe[SubnetOnlyValidator]]{Size: 10} + ) // Place the validator on disk - require.NoError(putSubnetOnlyValidator(db, vdr)) + require.NoError(t, putSubnetOnlyValidator(addedDB, addedCache, sov)) + require.NoError(t, putSubnetOnlyValidator(addedAndRemovedDB, addedAndRemovedCache, sov)) + require.NoError(t, putSubnetOnlyValidator(addedAndRemovedAndAddedDB, addedAndRemovedAndAddedCache, sov)) - // Verify that the validator can be fetched from disk - gotVdr, err = getSubnetOnlyValidator(db, vdr.ValidationID) - require.NoError(err) - require.Equal(vdr, gotVdr) + // Remove the validator on disk + require.NoError(t, deleteSubnetOnlyValidator(removedDB, removedCache, sov.ValidationID)) + require.NoError(t, deleteSubnetOnlyValidator(addedAndRemovedDB, addedAndRemovedCache, sov.ValidationID)) + require.NoError(t, deleteSubnetOnlyValidator(addedAndRemovedAndAddedDB, addedAndRemovedAndAddedCache, sov.ValidationID)) - // Remove the validator from disk - require.NoError(deleteSubnetOnlyValidator(db, vdr.ValidationID)) + // Reintroduce the validator to disk + require.NoError(t, putSubnetOnlyValidator(addedAndRemovedAndAddedDB, addedAndRemovedAndAddedCache, sov)) - // Verify that the validator has been removed from disk - gotVdr, err = getSubnetOnlyValidator(db, vdr.ValidationID) - require.ErrorIs(err, database.ErrNotFound) - require.Zero(gotVdr) + addedTests := []struct { + name string + db database.Database + cache cache.Cacher[ids.ID, maybe.Maybe[SubnetOnlyValidator]] + }{ + { + name: "added in cache", + db: memdb.New(), + cache: addedCache, + }, + { + name: "added on disk", + db: addedDB, + cache: emptySoVCache, + }, + { + name: "added and removed and added in cache", + db: memdb.New(), + cache: addedAndRemovedAndAddedCache, + }, + { + name: "added and removed and added on disk", + db: addedAndRemovedAndAddedDB, + cache: emptySoVCache, + }, + } + for _, test := range addedTests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + gotSOV, err := getSubnetOnlyValidator(test.cache, test.db, sov.ValidationID) + require.NoError(err) + require.Equal(sov, gotSOV) + }) + } + + removedTests := []struct { + name string + db database.Database + cache cache.Cacher[ids.ID, maybe.Maybe[SubnetOnlyValidator]] + }{ + { + name: "empty", + db: memdb.New(), + cache: emptySoVCache, + }, + { + name: "removed from cache", + db: addedDB, + cache: removedCache, + }, + { + name: "removed from disk", + db: removedDB, + cache: emptySoVCache, + }, + } + for _, test := range removedTests { + t.Run(test.name, func(t *testing.T) { + _, err := getSubnetOnlyValidator(test.cache, test.db, sov.ValidationID) + require.ErrorIs(t, err, database.ErrNotFound) + }) + } } From d183148fe949cf9b7cec9e4914f52923964cdee5 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 1 Nov 2024 15:33:57 -0400 Subject: [PATCH 146/155] merged --- vms/platformvm/state/subnet_only_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index 8979da0cc9e3..c0a363d4e4a9 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -290,7 +290,7 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne ) switch priorSOV, err := state.GetSubnetOnlyValidator(sov.ValidationID); err { case nil: - if !priorSOV.constantsAreUnmodified(sov) { + if !priorSOV.immutableFieldsAreUnmodified(sov) { return ErrMutatedSubnetOnlyValidator } From b1bb45872d423ddec5e9493dbcb165b27f885209 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 1 Nov 2024 15:36:20 -0400 Subject: [PATCH 147/155] nit --- vms/platformvm/state/state_test.go | 2 +- vms/platformvm/state/subnet_only_validator.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index a1a8feb6676e..2b28dcc0977c 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1997,7 +1997,7 @@ func TestSubnetOnlyValidators(t *testing.T) { } // TestLoadSubnetOnlyValidatorAndLegacy tests that the state can be loaded when -// there is a mix of legacy validators and subnet only validators in the same +// there is a mix of legacy validators and subnet-only validators in the same // subnet. func TestLoadSubnetOnlyValidatorAndLegacy(t *testing.T) { var ( diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index c0a363d4e4a9..3f5799e7da28 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -26,22 +26,22 @@ var ( _ btree.LessFunc[SubnetOnlyValidator] = SubnetOnlyValidator.Less _ utils.Sortable[SubnetOnlyValidator] = SubnetOnlyValidator{} - ErrMutatedSubnetOnlyValidator = errors.New("subnet only validator contains mutated constant fields") - ErrConflictingSubnetOnlyValidator = errors.New("subnet only validator contains conflicting subnetID + nodeID pair") - ErrDuplicateSubnetOnlyValidator = errors.New("subnet only validator contains duplicate subnetID + nodeID pair") + ErrMutatedSubnetOnlyValidator = errors.New("subnet-only validator contains mutated constant fields") + ErrConflictingSubnetOnlyValidator = errors.New("subnet-only validator contains conflicting subnetID + nodeID pair") + ErrDuplicateSubnetOnlyValidator = errors.New("subnet-only validator contains duplicate subnetID + nodeID pair") ) type SubnetOnlyValidators interface { // GetActiveSubnetOnlyValidatorsIterator returns an iterator of all the - // active subnet only validators in increasing order of EndAccumulatedFee. + // active subnet-only validators in increasing order of EndAccumulatedFee. GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[SubnetOnlyValidator], error) // NumActiveSubnetOnlyValidators returns the number of currently active - // subnet only validators. + // subnet-only validators. NumActiveSubnetOnlyValidators() int // WeightOfSubnetOnlyValidators returns the total active and inactive weight - // of subnet only validators on [subnetID]. + // of subnet-only validators on [subnetID]. WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) // GetSubnetOnlyValidator returns the validator with [validationID] if it @@ -57,9 +57,9 @@ type SubnetOnlyValidators interface { // validator is 0, the validator is removed. // // If inserting this validator attempts to modify any of the constant fields - // of the subnet only validator struct, an error will be returned. + // of the subnet-only validator struct, an error will be returned. // - // If inserting this validator would cause the total weight of subnet only + // If inserting this validator would cause the total weight of subnet-only // validators on a subnet to overflow MaxUint64, an error will be returned. // // If inserting this validator would cause there to be multiple validators From ad31107c4501d4c0a264e87bf31f5c6c81029962 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 1 Nov 2024 15:59:41 -0400 Subject: [PATCH 148/155] Add weight diff helpers --- vms/platformvm/state/stakers.go | 4 +- vms/platformvm/state/state.go | 10 ++- vms/platformvm/state/state_test.go | 111 ++++++++++------------------- 3 files changed, 48 insertions(+), 77 deletions(-) diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index 14e4dcf7b1ef..f076e23ab59b 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -283,7 +283,7 @@ func (d *diffValidator) WeightDiff() (ValidatorWeightDiff, error) { } for _, staker := range d.deletedDelegators { - if err := weightDiff.Add(true, staker.Weight); err != nil { + if err := weightDiff.Sub(staker.Weight); err != nil { return ValidatorWeightDiff{}, fmt.Errorf("failed to decrease node weight diff: %w", err) } } @@ -294,7 +294,7 @@ func (d *diffValidator) WeightDiff() (ValidatorWeightDiff, error) { for addedDelegatorIterator.Next() { staker := addedDelegatorIterator.Value() - if err := weightDiff.Add(false, staker.Weight); err != nil { + if err := weightDiff.Add(staker.Weight); err != nil { return ValidatorWeightDiff{}, fmt.Errorf("failed to increase node weight diff: %w", err) } } diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 09a22f8c27de..3368884da910 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -425,7 +425,15 @@ type ValidatorWeightDiff struct { Amount uint64 `serialize:"true"` } -func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { +func (v *ValidatorWeightDiff) Add(amount uint64) error { + return v.add(false, amount) +} + +func (v *ValidatorWeightDiff) Sub(amount uint64) error { + return v.add(true, amount) +} + +func (v *ValidatorWeightDiff) add(negative bool, amount uint64) error { if v.Decrease == negative { var err error v.Amount, err = safemath.Add(v.Amount, amount) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 143f673b4e0f..f56204d1f208 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -671,29 +671,32 @@ func createPermissionlessDelegatorTx(subnetID ids.ID, delegatorData txs.Validato } func TestValidatorWeightDiff(t *testing.T) { + type op struct { + op func(*ValidatorWeightDiff, uint64) error + amount uint64 + } type test struct { name string - ops []func(*ValidatorWeightDiff) error + ops []op expected *ValidatorWeightDiff expectedErr error } + var ( + add = (*ValidatorWeightDiff).Add + sub = (*ValidatorWeightDiff).Sub + ) tests := []test{ { name: "no ops", - ops: []func(*ValidatorWeightDiff) error{}, expected: &ValidatorWeightDiff{}, expectedErr: nil, }, { name: "simple decrease", - ops: []func(*ValidatorWeightDiff) error{ - func(d *ValidatorWeightDiff) error { - return d.Add(true, 1) - }, - func(d *ValidatorWeightDiff) error { - return d.Add(true, 1) - }, + ops: []op{ + {sub, 1}, + {sub, 1}, }, expected: &ValidatorWeightDiff{ Decrease: true, @@ -703,26 +706,17 @@ func TestValidatorWeightDiff(t *testing.T) { }, { name: "decrease overflow", - ops: []func(*ValidatorWeightDiff) error{ - func(d *ValidatorWeightDiff) error { - return d.Add(true, math.MaxUint64) - }, - func(d *ValidatorWeightDiff) error { - return d.Add(true, 1) - }, + ops: []op{ + {sub, math.MaxUint64}, + {sub, 1}, }, - expected: &ValidatorWeightDiff{}, expectedErr: safemath.ErrOverflow, }, { name: "simple increase", - ops: []func(*ValidatorWeightDiff) error{ - func(d *ValidatorWeightDiff) error { - return d.Add(false, 1) - }, - func(d *ValidatorWeightDiff) error { - return d.Add(false, 1) - }, + ops: []op{ + {add, 1}, + {add, 1}, }, expected: &ValidatorWeightDiff{ Decrease: false, @@ -732,58 +726,24 @@ func TestValidatorWeightDiff(t *testing.T) { }, { name: "increase overflow", - ops: []func(*ValidatorWeightDiff) error{ - func(d *ValidatorWeightDiff) error { - return d.Add(false, math.MaxUint64) - }, - func(d *ValidatorWeightDiff) error { - return d.Add(false, 1) - }, + ops: []op{ + {add, math.MaxUint64}, + {add, 1}, }, - expected: &ValidatorWeightDiff{}, expectedErr: safemath.ErrOverflow, }, { name: "varied use", - ops: []func(*ValidatorWeightDiff) error{ - // Add to 0 - func(d *ValidatorWeightDiff) error { - return d.Add(false, 2) // Value 2 - }, - // Subtract from positive number - func(d *ValidatorWeightDiff) error { - return d.Add(true, 1) // Value 1 - }, - // Subtract from positive number - // to make it negative - func(d *ValidatorWeightDiff) error { - return d.Add(true, 3) // Value -2 - }, - // Subtract from a negative number - func(d *ValidatorWeightDiff) error { - return d.Add(true, 3) // Value -5 - }, - // Add to a negative number - func(d *ValidatorWeightDiff) error { - return d.Add(false, 1) // Value -4 - }, - // Add to a negative number - // to make it positive - func(d *ValidatorWeightDiff) error { - return d.Add(false, 5) // Value 1 - }, - // Add to a positive number - func(d *ValidatorWeightDiff) error { - return d.Add(false, 1) // Value 2 - }, - // Get to zero - func(d *ValidatorWeightDiff) error { - return d.Add(true, 2) // Value 0 - }, - // Subtract from zero - func(d *ValidatorWeightDiff) error { - return d.Add(true, 2) // Value -2 - }, + ops: []op{ + {add, 2}, // = 2 + {sub, 1}, // = 1 + {sub, 3}, // = -2 + {sub, 3}, // = -5 + {add, 1}, // = -4 + {add, 5}, // = 1 + {add, 1}, // = 2 + {sub, 2}, // = 0 + {sub, 2}, // = -2 }, expected: &ValidatorWeightDiff{ Decrease: true, @@ -796,10 +756,13 @@ func TestValidatorWeightDiff(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - diff := &ValidatorWeightDiff{} - errs := wrappers.Errs{} + + var ( + diff = &ValidatorWeightDiff{} + errs = wrappers.Errs{} + ) for _, op := range tt.ops { - errs.Add(op(diff)) + errs.Add(op.op(diff, op.amount)) } require.ErrorIs(errs.Err, tt.expectedErr) if tt.expectedErr != nil { From cee236e010c43d4e1ffdfef2552065a55423556a Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 1 Nov 2024 16:47:48 -0400 Subject: [PATCH 149/155] nit --- vms/platformvm/state/state_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index f56204d1f208..60b92c58dc44 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -688,9 +688,8 @@ func TestValidatorWeightDiff(t *testing.T) { ) tests := []test{ { - name: "no ops", - expected: &ValidatorWeightDiff{}, - expectedErr: nil, + name: "no ops", + expected: &ValidatorWeightDiff{}, }, { name: "simple decrease", @@ -702,7 +701,6 @@ func TestValidatorWeightDiff(t *testing.T) { Decrease: true, Amount: 2, }, - expectedErr: nil, }, { name: "decrease overflow", @@ -722,7 +720,6 @@ func TestValidatorWeightDiff(t *testing.T) { Decrease: false, Amount: 2, }, - expectedErr: nil, }, { name: "increase overflow", @@ -749,7 +746,6 @@ func TestValidatorWeightDiff(t *testing.T) { Decrease: true, Amount: 2, }, - expectedErr: nil, }, } From 900eba34150de4285c59a71c757a13644b2d8e37 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 1 Nov 2024 16:51:57 -0400 Subject: [PATCH 150/155] add -> addOrSub --- vms/platformvm/state/state.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 3368884da910..1e45b6f07826 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -426,15 +426,15 @@ type ValidatorWeightDiff struct { } func (v *ValidatorWeightDiff) Add(amount uint64) error { - return v.add(false, amount) + return v.addOrSub(false, amount) } func (v *ValidatorWeightDiff) Sub(amount uint64) error { - return v.add(true, amount) + return v.addOrSub(true, amount) } -func (v *ValidatorWeightDiff) add(negative bool, amount uint64) error { - if v.Decrease == negative { +func (v *ValidatorWeightDiff) addOrSub(sub bool, amount uint64) error { + if v.Decrease == sub { var err error v.Amount, err = safemath.Add(v.Amount, amount) return err @@ -444,7 +444,7 @@ func (v *ValidatorWeightDiff) add(negative bool, amount uint64) error { v.Amount -= amount } else { v.Amount = safemath.AbsDiff(v.Amount, amount) - v.Decrease = negative + v.Decrease = sub } return nil } From 7cbf31b3f759f20235838146a705a90ae0a3df40 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Fri, 1 Nov 2024 20:00:29 -0400 Subject: [PATCH 151/155] fix merge --- vms/platformvm/state/state.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 07dce8b248a6..ad09f28457b7 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -2496,7 +2496,7 @@ func (s *state) calculateValidatorDiffs() (map[subnetIDNodeID]*validatorDiff, er nodeID: priorSOV.effectiveNodeID(), } diff := getOrDefault(changes, subnetIDNodeID) - if err := diff.weightDiff.Add(true, priorSOV.Weight); err != nil { + if err := diff.weightDiff.Sub(priorSOV.Weight); err != nil { return nil, err } diff.prevPublicKey = priorSOV.effectivePublicKeyBytes() @@ -2516,7 +2516,7 @@ func (s *state) calculateValidatorDiffs() (map[subnetIDNodeID]*validatorDiff, er nodeID: sov.effectiveNodeID(), } diff := getOrDefault(changes, subnetIDNodeID) - if err := diff.weightDiff.Add(false, sov.Weight); err != nil { + if err := diff.weightDiff.Add(sov.Weight); err != nil { return nil, err } diff.newPublicKey = sov.effectivePublicKeyBytes() From 69837c104c0aca3874ed6badc220234eacebceb3 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 4 Nov 2024 14:31:34 -0500 Subject: [PATCH 152/155] improve caching --- vms/platformvm/state/state.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index ad09f28457b7..61a697ad31d0 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -850,7 +850,13 @@ func (s *state) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { return weight, nil } - return database.WithDefault(database.GetUInt64, s.weightsDB, subnetID[:], 0) + weight, err := database.WithDefault(database.GetUInt64, s.weightsDB, subnetID[:], 0) + if err != nil { + return 0, err + } + + s.weightsCache.Put(subnetID, weight) + return weight, nil } func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { @@ -889,7 +895,13 @@ func (s *state) HasSubnetOnlyValidator(subnetID ids.ID, nodeID ids.NodeID) (bool } key := subnetIDNodeID.Marshal() - return s.subnetIDNodeIDDB.Has(key) + has, err := s.subnetIDNodeIDDB.Has(key) + if err != nil { + return false, err + } + + s.subnetIDNodeIDCache.Put(subnetIDNodeID, has) + return has, nil } func (s *state) PutSubnetOnlyValidator(sov SubnetOnlyValidator) error { From d37e9f3b116146cf52d4e3f76944e268bdfb52d8 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 4 Nov 2024 19:00:33 -0500 Subject: [PATCH 153/155] nit --- vms/platformvm/state/state.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 61a697ad31d0..67be6717fa44 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1670,7 +1670,6 @@ func (s *state) loadExpiry() error { func (s *state) loadActiveSubnetOnlyValidators() error { it := s.activeDB.NewIterator() defer it.Release() - for it.Next() { key := it.Key() validationID, err := ids.ToID(key) From 9dc642a4c832f9a0305f60fcb37b8cf288253c54 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 4 Nov 2024 19:07:06 -0500 Subject: [PATCH 154/155] num -> net for possibly negative value --- vms/platformvm/state/diff.go | 2 +- vms/platformvm/state/state.go | 2 +- vms/platformvm/state/subnet_only_validator.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 4b8922f74492..30ee112a41b9 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -213,7 +213,7 @@ func (d *diff) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[Subnet } func (d *diff) NumActiveSubnetOnlyValidators() int { - return d.parentActiveSOVs + d.sovDiff.numAddedActive + return d.parentActiveSOVs + d.sovDiff.netAddedActive } func (d *diff) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 67be6717fa44..3ec8527a258b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -838,7 +838,7 @@ func (s *state) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[Subne } func (s *state) NumActiveSubnetOnlyValidators() int { - return s.activeSOVs.len() + s.sovDiff.numAddedActive + return s.activeSOVs.len() + s.sovDiff.netAddedActive } func (s *state) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { diff --git a/vms/platformvm/state/subnet_only_validator.go b/vms/platformvm/state/subnet_only_validator.go index bb9af0d78ca2..109fd23ea9f2 100644 --- a/vms/platformvm/state/subnet_only_validator.go +++ b/vms/platformvm/state/subnet_only_validator.go @@ -250,7 +250,7 @@ func deleteSubnetOnlyValidator( } type subnetOnlyValidatorsDiff struct { - numAddedActive int // May be negative + netAddedActive int // May be negative modifiedTotalWeight map[ids.ID]uint64 // subnetID -> totalWeight modified map[ids.ID]SubnetOnlyValidator modifiedHasNodeIDs map[subnetIDNodeID]bool @@ -344,9 +344,9 @@ func (d *subnetOnlyValidatorsDiff) putSubnetOnlyValidator(state Chain, sov Subne switch { case prevActive && !newActive: - d.numAddedActive-- + d.netAddedActive-- case !prevActive && newActive: - d.numAddedActive++ + d.netAddedActive++ } if prevSOV, ok := d.modified[sov.ValidationID]; ok { From 22de2b13b1e8bcb9024074250d98f6c8eb0e9c0d Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 5 Nov 2024 10:52:20 -0500 Subject: [PATCH 155/155] Address PR comments --- vms/platformvm/state/diff.go | 34 +++++++++++++++--------------- vms/platformvm/state/state.go | 10 +++++---- vms/platformvm/state/state_test.go | 24 +++++++++++++++++++++ 3 files changed, 47 insertions(+), 21 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 30ee112a41b9..317e4e210142 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -35,11 +35,11 @@ type diff struct { parentID ids.ID stateVersions Versions - timestamp time.Time - feeState gas.State - sovExcess gas.Gas - accruedFees uint64 - parentActiveSOVs int + timestamp time.Time + feeState gas.State + sovExcess gas.Gas + accruedFees uint64 + parentNumActiveSOVs int // Subnet ID --> supply of native asset of the subnet currentSupply map[ids.ID]uint64 @@ -79,17 +79,17 @@ func NewDiff( return nil, fmt.Errorf("%w: %s", ErrMissingParentState, parentID) } return &diff{ - parentID: parentID, - stateVersions: stateVersions, - timestamp: parentState.GetTimestamp(), - feeState: parentState.GetFeeState(), - sovExcess: parentState.GetSoVExcess(), - accruedFees: parentState.GetAccruedFees(), - parentActiveSOVs: parentState.NumActiveSubnetOnlyValidators(), - expiryDiff: newExpiryDiff(), - sovDiff: newSubnetOnlyValidatorsDiff(), - subnetOwners: make(map[ids.ID]fx.Owner), - subnetConversions: make(map[ids.ID]SubnetConversion), + parentID: parentID, + stateVersions: stateVersions, + timestamp: parentState.GetTimestamp(), + feeState: parentState.GetFeeState(), + sovExcess: parentState.GetSoVExcess(), + accruedFees: parentState.GetAccruedFees(), + parentNumActiveSOVs: parentState.NumActiveSubnetOnlyValidators(), + expiryDiff: newExpiryDiff(), + sovDiff: newSubnetOnlyValidatorsDiff(), + subnetOwners: make(map[ids.ID]fx.Owner), + subnetConversions: make(map[ids.ID]SubnetConversion), }, nil } @@ -213,7 +213,7 @@ func (d *diff) GetActiveSubnetOnlyValidatorsIterator() (iterator.Iterator[Subnet } func (d *diff) NumActiveSubnetOnlyValidators() int { - return d.parentActiveSOVs + d.sovDiff.netAddedActive + return d.parentNumActiveSOVs + d.sovDiff.netAddedActive } func (d *diff) WeightOfSubnetOnlyValidators(subnetID ids.ID) (uint64, error) { diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 3ec8527a258b..12eebd953132 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -872,7 +872,7 @@ func (s *state) GetSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator // getPersistedSubnetOnlyValidator returns the currently persisted // SubnetOnlyValidator with the given validationID. It is guaranteed that any -// returned validator is either active or inactive. +// returned validator is either active or inactive (not deleted). func (s *state) getPersistedSubnetOnlyValidator(validationID ids.ID) (SubnetOnlyValidator, error) { if sov, ok := s.activeSOVs.get(validationID); ok { return sov, nil @@ -2506,7 +2506,7 @@ func (s *state) calculateValidatorDiffs() (map[subnetIDNodeID]*validatorDiff, er subnetID: priorSOV.SubnetID, nodeID: priorSOV.effectiveNodeID(), } - diff := getOrDefault(changes, subnetIDNodeID) + diff := getOrSetDefault(changes, subnetIDNodeID) if err := diff.weightDiff.Sub(priorSOV.Weight); err != nil { return nil, err } @@ -2526,7 +2526,7 @@ func (s *state) calculateValidatorDiffs() (map[subnetIDNodeID]*validatorDiff, er subnetID: sov.SubnetID, nodeID: sov.effectiveNodeID(), } - diff := getOrDefault(changes, subnetIDNodeID) + diff := getOrSetDefault(changes, subnetIDNodeID) if err := diff.weightDiff.Add(sov.Weight); err != nil { return nil, err } @@ -2571,7 +2571,9 @@ func (s *state) writeValidatorDiffs(height uint64) error { return nil } -func getOrDefault[K comparable, V any](m map[K]*V, k K) *V { +// getOrSetDefault returns the value at k in m if it exists. If it doesn't +// exist, it sets m[k] to a new value and returns that value. +func getOrSetDefault[K comparable, V any](m map[K]*V, k K) *V { if v, ok := m[k]; ok { return v } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 0a27b92d7dde..342365a6db46 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -1774,6 +1774,27 @@ func TestSubnetOnlyValidators(t *testing.T) { }, }, }, + { + name: "add multiple inactive", + sovs: []SubnetOnlyValidator{ + { + ValidationID: ids.GenerateTestID(), + SubnetID: sov.SubnetID, + NodeID: ids.GenerateTestNodeID(), + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + { + ValidationID: sov.ValidationID, + SubnetID: sov.SubnetID, + NodeID: sov.NodeID, + PublicKey: pkBytes, + Weight: 1, // Not removed + EndAccumulatedFee: 0, // Inactive + }, + }, + }, } for _, test := range tests { @@ -1788,6 +1809,9 @@ func TestSubnetOnlyValidators(t *testing.T) { subnetIDs set.Set[ids.ID] ) for _, sov := range test.initial { + // The codec creates zero length slices rather than leaving them + // as nil, so we need to populate the slices for later reflect + // based equality checks. sov.RemainingBalanceOwner = []byte{} sov.DeactivationOwner = []byte{}