Skip to content

Commit

Permalink
e2e: Add integration test for network transition from v1 to v2 (#1740)
Browse files Browse the repository at this point in the history
Closes #1546
  • Loading branch information
ChrisSchinnerl authored Dec 18, 2024
2 parents 9d3a308 + 4e8ab84 commit 693ad3b
Show file tree
Hide file tree
Showing 18 changed files with 419 additions and 148 deletions.
16 changes: 11 additions & 5 deletions autopilot/contractor/contractor.go
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,9 @@ func activeContracts(ctx context.Context, bus Bus, logger *zap.SugaredLogger) ([
// fetch active contracts
logger.Info("fetching active contracts")
start := time.Now()
metadatas, err := bus.Contracts(ctx, api.ContractsOpts{FilterMode: api.ContractFilterModeActive})
metadatas, err := bus.Contracts(ctx, api.ContractsOpts{
FilterMode: api.ContractFilterModeActive,
})
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -907,7 +909,9 @@ func performContractFormations(ctx *mCtx, bus Bus, cr contractReviser, hf hostFi
wanted := int(ctx.WantedContracts())

// fetch all active contracts
contracts, err := bus.Contracts(ctx, api.ContractsOpts{})
contracts, err := bus.Contracts(ctx, api.ContractsOpts{
FilterMode: api.ContractFilterModeActive,
})
if err != nil {
return 0, fmt.Errorf("failed to fetch contracts: %w", err)
}
Expand Down Expand Up @@ -1057,7 +1061,9 @@ func performHostChecks(ctx *mCtx, bus Bus, logger *zap.SugaredLogger) error {

func performPostMaintenanceTasks(ctx *mCtx, bus Bus, alerter alerts.Alerter, cc contractChecker, rb revisionBroadcaster, logger *zap.SugaredLogger) error {
// fetch some contract and host info
allContracts, err := bus.Contracts(ctx, api.ContractsOpts{})
allContracts, err := bus.Contracts(ctx, api.ContractsOpts{
FilterMode: api.ContractFilterModeActive,
})
if err != nil {
return fmt.Errorf("failed to fetch all contracts: %w", err)
}
Expand Down Expand Up @@ -1122,7 +1128,7 @@ func performV2ContractMigration(ctx *mCtx, bus Bus, cr contractReviser, logger *
}

contracts, err := bus.Contracts(ctx, api.ContractsOpts{
FilterMode: api.ContractFilterModeAll, // TODO: change to usable
FilterMode: api.ContractFilterModeActive,
})
if err != nil {
logger.With(zap.Error(err)).Error("failed to fetch contracts for migration")
Expand Down Expand Up @@ -1154,7 +1160,7 @@ func performV2ContractMigration(ctx *mCtx, bus Bus, cr contractReviser, logger *
}

// form a new contract with the same host
contract, _, err := cr.formContract(ctx, bus, host, InitialContractFunding, logger)
_, _, err = cr.formContract(ctx, bus, host, InitialContractFunding, logger)
if err != nil {
logger.Errorf("failed to form a v2 contract with the host")
continue
Expand Down
7 changes: 7 additions & 0 deletions bus/bus.go
Original file line number Diff line number Diff line change
Expand Up @@ -794,6 +794,13 @@ func (b *Bus) renewContractV1(ctx context.Context, cs consensus.State, gp api.Go
// derive the renter key
renterKey := b.masterKey.DeriveContractKey(c.HostKey)

// cap v1 renewals to the v2 require height since the host won't allow us to
// form contracts beyond that
v2ReqHeight := b.cm.TipState().Network.HardforkV2.RequireHeight
if endHeight >= v2ReqHeight {
endHeight = v2ReqHeight - 1
}

// fetch the revision
rev, err := b.rhp3Client.Revision(ctx, c.ID, c.HostKey, hs.SiamuxAddr())
if err != nil {
Expand Down
7 changes: 7 additions & 0 deletions bus/routes.go
Original file line number Diff line number Diff line change
Expand Up @@ -2323,6 +2323,13 @@ func (b *Bus) contractsFormHandler(jc jape.Context) {
return
}

// cap v1 formations to the v2 require height since the host won't allow
// us to form contracts beyond that
v2ReqHeight := b.cm.TipState().Network.HardforkV2.RequireHeight
if rfr.EndHeight >= v2ReqHeight {
rfr.EndHeight = v2ReqHeight - 1
}

// check gouging
breakdown := gc.CheckSettings(settings)
if breakdown.Gouging() {
Expand Down
1 change: 1 addition & 0 deletions cmd/renterd/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ func defaultConfig() config.Config {
ID: "",
AccountsRefillInterval: defaultAccountRefillInterval,
BusFlushInterval: 5 * time.Second,
CacheExpiry: 5 * time.Minute,

DownloadMaxOverdrive: 5,
DownloadOverdriveTimeout: 3 * time.Second,
Expand Down
1 change: 1 addition & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ type (
UploadMaxMemory uint64 `yaml:"uploadMaxMemory,omitempty"`
UploadMaxOverdrive uint64 `yaml:"uploadMaxOverdrive,omitempty"`
AllowUnauthenticatedDownloads bool `yaml:"allowUnauthenticatedDownloads,omitempty"`
CacheExpiry time.Duration `yaml:"cacheExpiry,omitempty"`
}

// Autopilot contains the configuration for an autopilot.
Expand Down
6 changes: 3 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ require (
github.com/mattn/go-sqlite3 v1.14.24
github.com/montanaflynn/stats v0.7.1
github.com/shopspring/decimal v1.4.0
go.sia.tech/core v0.8.0
go.sia.tech/coreutils v0.8.1-0.20241217101542-5d6fc37cbb94
go.sia.tech/core v0.8.1-0.20241217152409-7950a7ca324b
go.sia.tech/coreutils v0.8.1-0.20241217153531-b5e84c03d17f
go.sia.tech/gofakes3 v0.0.5
go.sia.tech/hostd v1.1.3-0.20241217094733-44bba16e129e
go.sia.tech/hostd v1.1.3-0.20241218083322-ae9c8a971fe0
go.sia.tech/jape v0.12.1
go.sia.tech/mux v1.3.0
go.sia.tech/web/renterd v0.69.0
Expand Down
12 changes: 6 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.sia.tech/core v0.8.0 h1:J6vZQlVhpj4bTVeuC2GKkfkGEs8jf0j651Kl1wwOxjg=
go.sia.tech/core v0.8.0/go.mod h1:Wj1qzvpMM2rqEQjwWJEbCBbe9VWX/mSJUu2Y2ABl1QA=
go.sia.tech/coreutils v0.8.1-0.20241217101542-5d6fc37cbb94 h1:1fbD59wfyA1+5LmLYNh+ukNpkbtEmQgcXYlRUZTdr+M=
go.sia.tech/coreutils v0.8.1-0.20241217101542-5d6fc37cbb94/go.mod h1:ml5MefDMWCvPKNeRVIGHmyF5tv27C9h1PiI/iOiTGLg=
go.sia.tech/core v0.8.1-0.20241217152409-7950a7ca324b h1:VRkb6OOX1KawLQwuqOEHLcjha8gxVX0tAyu2Dyoq8Ek=
go.sia.tech/core v0.8.1-0.20241217152409-7950a7ca324b/go.mod h1:Wj1qzvpMM2rqEQjwWJEbCBbe9VWX/mSJUu2Y2ABl1QA=
go.sia.tech/coreutils v0.8.1-0.20241217153531-b5e84c03d17f h1:TafvnqJgx/+0zX/QMSOOkf5HfMqaoe/73eO515fUucI=
go.sia.tech/coreutils v0.8.1-0.20241217153531-b5e84c03d17f/go.mod h1:xhIbFjjkzmCF8Dt73ZvquaBQCT2Dje7AKYBRAesn93w=
go.sia.tech/gofakes3 v0.0.5 h1:vFhVBUFbKE9ZplvLE2w4TQxFMQyF8qvgxV4TaTph+Vw=
go.sia.tech/gofakes3 v0.0.5/go.mod h1:LXEzwGw+OHysWLmagleCttX93cJZlT9rBu/icOZjQ54=
go.sia.tech/hostd v1.1.3-0.20241217094733-44bba16e129e h1:VWdrQiZKnoWxB3Qtxkolph+SL6/qharIX8dkqZ7i1d0=
go.sia.tech/hostd v1.1.3-0.20241217094733-44bba16e129e/go.mod h1:9jRImPfriQKypd7O6O46BQzRkyx+0tRabNKxQxJxDR8=
go.sia.tech/hostd v1.1.3-0.20241218083322-ae9c8a971fe0 h1:QtF8l+pHZq6gPyDyuQoMv8GdwU6lvz39y4I34S3cuvo=
go.sia.tech/hostd v1.1.3-0.20241218083322-ae9c8a971fe0/go.mod h1:9jRImPfriQKypd7O6O46BQzRkyx+0tRabNKxQxJxDR8=
go.sia.tech/jape v0.12.1 h1:xr+o9V8FO8ScRqbSaqYf9bjj1UJ2eipZuNcI1nYousU=
go.sia.tech/jape v0.12.1/go.mod h1:wU+h6Wh5olDjkPXjF0tbZ1GDgoZ6VTi4naFw91yyWC4=
go.sia.tech/mux v1.3.0 h1:hgR34IEkqvfBKUJkAzGi31OADeW2y7D6Bmy/Jcbop9c=
Expand Down
8 changes: 2 additions & 6 deletions internal/accounts/accounts.go
Original file line number Diff line number Diff line change
Expand Up @@ -448,12 +448,8 @@ func (a *Manager) refillAccount(ctx context.Context, contract api.ContractMetada
}

func (a *Account) Token() rhpv4.AccountToken {
t := rhpv4.AccountToken{
Account: rhpv4.Account(a.key.PublicKey()),
ValidUntil: time.Now().Add(5 * time.Minute),
}
t.Signature = a.key.SignHash(t.SigHash())
return t
account := rhpv4.Account(a.key.PublicKey())
return account.Token(a.key, a.acc.HostKey)
}

// WithSync syncs an accounts balance with the bus. To do so, the account is
Expand Down
13 changes: 11 additions & 2 deletions internal/bus/chainsubscriber.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"math"
"strings"
"sync"
"time"

Expand Down Expand Up @@ -71,6 +72,7 @@ type (

Wallet interface {
FundV2Transaction(txn *types.V2Transaction, amount types.Currency, useUnconfirmed bool) (types.ChainIndex, []int, error)
ReleaseInputs(txns []types.Transaction, v2txns []types.V2Transaction)
SignV2Inputs(txn *types.V2Transaction, toSign []int)
UpdateChainState(tx wallet.UpdateTx, reverted []chain.RevertUpdate, applied []chain.ApplyUpdate) error
}
Expand Down Expand Up @@ -433,8 +435,15 @@ func (s *chainSubscriber) broadcastExpiredFileContractResolutions(tx sql.ChainUp

// verify txn and broadcast it
_, err = s.cm.AddV2PoolTransactions(basis, []types.V2Transaction{txn})
if err != nil {
s.logger.Errorf("failed to broadcast contract expiration txn: %v", err)
if err != nil &&
(strings.Contains(err.Error(), "has already been resolved") ||
strings.Contains(err.Error(), "not present in the accumulator")) {
s.wallet.ReleaseInputs(nil, []types.V2Transaction{txn})
s.logger.With(zap.Error(err)).Debug("failed to broadcast contract expiration txn")
continue
} else if err != nil {
s.logger.With(zap.Error(err)).Error("failed to broadcast contract expiration txn")
s.wallet.ReleaseInputs(nil, []types.V2Transaction{txn})
continue
}
s.s.BroadcastV2TransactionSet(basis, []types.V2Transaction{txn})
Expand Down
3 changes: 2 additions & 1 deletion internal/test/e2e/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ func announceHosts(hosts []*Host) error {
for _, host := range hosts {
settings := defaultHostSettings
settings.NetAddress = host.rhp4Listener.Addr().(*net.TCPAddr).IP.String()
if err := host.settings.UpdateSettings(settings); err != nil {
if err := host.UpdateSettings(settings); err != nil {
return err
}
if err := host.settings.Announce(); err != nil {
Expand Down Expand Up @@ -1000,6 +1000,7 @@ func testDBCfg() dbConfig {
func testWorkerCfg() config.Worker {
return config.Worker{
AccountsRefillInterval: 10 * time.Millisecond,
CacheExpiry: 100 * time.Millisecond,
ID: "worker",
BusFlushInterval: testBusFlushInterval,
DownloadOverdriveTimeout: 500 * time.Millisecond,
Expand Down
176 changes: 175 additions & 1 deletion internal/test/e2e/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2602,7 +2602,7 @@ func TestDownloadAllHosts(t *testing.T) {
// block the new host but unblock the old one
for _, host := range cluster.hosts {
if host.PublicKey() == newHost {
toBlock := []string{host.settings.Settings().NetAddress, host.RHPv4Addr()}
toBlock := []string{host.RHPv2Addr(), host.RHPv4Addr()}
tt.OK(b.UpdateHostBlocklist(context.Background(), toBlock, randomHost, false))
}
}
Expand All @@ -2615,6 +2615,7 @@ func TestDownloadAllHosts(t *testing.T) {
tt.OK(host.UpdateSettings(settings))
}
}
time.Sleep(testWorkerCfg().CacheExpiry) // expire cache

// download the object
dst = new(bytes.Buffer)
Expand Down Expand Up @@ -2833,6 +2834,7 @@ func TestContractFundsReturnWhenHostOffline(t *testing.T) {

// mine until the contract is expired
cluster.mineBlocks(types.VoidAddress, contract.WindowEnd-cs.BlockHeight)
cluster.sync()

expectedBalance := wallet.Confirmed.Add(contract.InitialRenterFunds).Sub(fee.Mul64(ibus.ContractResolutionTxnWeight))
cluster.tt.Retry(10, time.Second, func() error {
Expand Down Expand Up @@ -2913,3 +2915,175 @@ func TestResyncAccounts(t *testing.T) {
return w.DownloadObject(context.Background(), bytes.NewBuffer(nil), testBucket, path, api.DownloadObjectOptions{})
})
}

func TestV1ToV2Transition(t *testing.T) {
// create a chain manager with a custom network that starts before the v2
// allow height
network, genesis := testNetwork()
network.HardforkV2.AllowHeight = 100
network.HardforkV2.RequireHeight = 200 // 100 blocks after the allow height
store, state, err := chain.NewDBStore(chain.NewMemDB(), network, genesis)
if err != nil {
t.Fatal(err)
}
cm := chain.NewManager(store, state)

// custom autopilot config
apCfg := test.AutopilotConfig
apCfg.Contracts.Amount = 2
apCfg.Contracts.Period = 1000 // make sure we handle trying to form contracts with a proof height after the v2 require height
apCfg.Contracts.RenewWindow = 50

// create a test cluster
nHosts := 3
cluster := newTestCluster(t, testClusterOptions{
autopilotConfig: &apCfg,
hosts: 0, // add hosts manually later
cm: cm,
uploadPacking: false, // disable to make sure we don't accidentally serve data from disk
})
defer cluster.Shutdown()
tt := cluster.tt

// add hosts and wait for contracts to form
cluster.AddHosts(nHosts)

// make sure we are still before the v2 allow height
if cm.Tip().Height >= network.HardforkV2.AllowHeight {
t.Fatal("should be before the v2 allow height")
}

// we should have 2 v1 contracts
var contracts []api.ContractMetadata
tt.Retry(100, 100*time.Millisecond, func() error {
contracts, err = cluster.Bus.Contracts(context.Background(), api.ContractsOpts{FilterMode: api.ContractFilterModeAll})
tt.OK(err)
if len(contracts) != nHosts-1 {
return fmt.Errorf("expected %v contracts, got %v", nHosts-1, len(contracts))
}
return nil
})
contractHosts := make(map[types.PublicKey]struct{})
for _, c := range contracts {
if c.V2 {
t.Fatal("should not have formed v2 contracts")
} else if c.EndHeight() != network.HardforkV2.RequireHeight-1 {
t.Fatalf("expected proof height to be %v, got %v", network.HardforkV2.RequireHeight-1, c.EndHeight())
}
contractHosts[c.HostKey] = struct{}{}
}

// sanity check number of hosts just to be safe
if len(contractHosts) != nHosts-1 {
t.Fatalf("expected %v unique hosts, got %v", nHosts-1, len(contractHosts))
}

// upload some data
data := frand.Bytes(100)
tt.OKAll(cluster.Worker.UploadObject(context.Background(), bytes.NewReader(data), testBucket, "foo", api.UploadObjectOptions{
MinShards: 1,
TotalShards: nHosts - 1,
}))

// mine until we reach the v2 allowheight
cluster.MineBlocks(network.HardforkV2.AllowHeight - cm.Tip().Height)

// slowly mine a few more blocks to allow renter to react
for i := 0; i < 5; i++ {
cluster.MineBlocks(1)
time.Sleep(100 * time.Millisecond)
}

// check that we have 1 archived contract for every contract we had before
var archivedContracts []api.ContractMetadata
tt.Retry(100, 100*time.Millisecond, func() error {
archivedContracts, err = cluster.Bus.Contracts(context.Background(), api.ContractsOpts{FilterMode: api.ContractFilterModeArchived})
tt.OK(err)
if len(archivedContracts) != nHosts-1 {
return fmt.Errorf("expected %v archived contracts, got %v", nHosts-1, len(archivedContracts))
}
return nil
})

// they should be on nHosts-1 unique hosts
usedHosts := make(map[types.PublicKey]struct{})
for _, c := range archivedContracts {
if c.ArchivalReason != "migrated to v2" {
t.Fatalf("expected archival reason to be 'migrated to v2', got %v", c.ArchivalReason)
}
usedHosts[c.HostKey] = struct{}{}
}
if len(usedHosts) != nHosts-1 {
t.Fatalf("expected %v unique hosts, got %v", nHosts-1, len(usedHosts))
}

// we should have the same number of active contracts
activeContracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{FilterMode: api.ContractFilterModeActive})
tt.OK(err)
if len(activeContracts) != nHosts-1 {
t.Fatalf("expected %v active contracts, got %v", nHosts-1, len(activeContracts))
}

// they should be on the same hosts as before
for _, c := range activeContracts {
if _, ok := usedHosts[c.HostKey]; !ok {
t.Fatal("host not found in used hosts")
} else if !c.V2 {
t.Fatal("expected contract to be v2, got v1", c.ID, c.ArchivalReason)
}
delete(usedHosts, c.HostKey)
}

tt.Retry(100, 100*time.Millisecond, func() error {
// check health is 1
object, err := cluster.Bus.Object(context.Background(), testBucket, "foo", api.GetObjectOptions{})
tt.OK(err)
if object.Health != 1 {
return fmt.Errorf("expected health to be 1, got %v", object.Health)
}
slab := object.Slabs[0]

// check that the contracts now contain the data
activeContracts, err = cluster.Bus.Contracts(context.Background(), api.ContractsOpts{FilterMode: api.ContractFilterModeActive})
tt.OK(err)
for _, c := range activeContracts {
// check revision
rev, err := cluster.Bus.ContractRevision(context.Background(), c.ID)
tt.OK(err)
if rev.Size != rhpv4.SectorSize {
return fmt.Errorf("expected revision size to be %v, got %v", rhpv4.SectorSize, rev.Size)
}
// check local metadata
if c.Size != rhpv4.SectorSize {
return fmt.Errorf("expected contract size to be %v, got %v", rhpv4.SectorSize, c.Size)
}
// one of the shards should be on this contract
var found bool
for _, shard := range slab.Shards {
for _, fcid := range shard.Contracts[c.HostKey] {
found = found || fcid == c.ID
}
}
if !found {
t.Fatal("expected contract to shard data")
}
}
return nil
})

// download file to make sure it's still available
// NOTE: 1st try fails since the accounts appear not to be funded since the
// test host has separate account managers for rhp3 and rhp4
tt.FailAll(cluster.Worker.DownloadObject(context.Background(), bytes.NewBuffer(nil), testBucket, "foo", api.DownloadObjectOptions{}))

// subsequent tries succeed
tt.Retry(100, 100*time.Millisecond, func() error {
buf := new(bytes.Buffer)
if err := cluster.Worker.DownloadObject(context.Background(), buf, testBucket, "foo", api.DownloadObjectOptions{}); err != nil {
return err
} else if !bytes.Equal(data, buf.Bytes()) {
t.Fatal("data mismatch")
}
return nil
})
}
Loading

0 comments on commit 693ad3b

Please sign in to comment.