Skip to content

Commit

Permalink
testing: add waitForPeers
Browse files Browse the repository at this point in the history
  • Loading branch information
peterjan committed Apr 22, 2024
1 parent 15c2659 commit de6679e
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 19 deletions.
16 changes: 15 additions & 1 deletion internal/test/e2e/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster {

if nHosts > 0 {
cluster.AddHostsBlocking(nHosts)
cluster.WaitForPeers()
cluster.WaitForContracts()
cluster.WaitForContractSet(test.ContractSet, nHosts)
cluster.WaitForAccounts()
Expand Down Expand Up @@ -657,6 +658,19 @@ func (c *TestCluster) WaitForContractSetContracts(set string, n int) {
})
}

func (c *TestCluster) WaitForPeers() {
c.tt.Helper()
c.tt.Retry(300, 100*time.Millisecond, func() error {
peers, err := c.Bus.SyncerPeers(context.Background())
if err != nil {
return err
} else if len(peers) == 0 {
return errors.New("no peers found")
}
return nil
})
}

func (c *TestCluster) RemoveHost(host *Host) {
c.tt.Helper()
c.tt.OK(host.Close())
Expand Down Expand Up @@ -686,7 +700,7 @@ func (c *TestCluster) AddHost(h *Host) {
// Add the host
c.hosts = append(c.hosts, h)

// Fund host with one blockreward
// Fund host from bus.
fundAmt := types.Siacoins(25e3)
var scos []types.SiacoinOutput
for i := 0; i < 10; i++ {
Expand Down
22 changes: 4 additions & 18 deletions internal/test/e2e/pruning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,11 @@ func TestHostPruning(t *testing.T) {
}

// create a new test cluster
cluster := newTestCluster(t, clusterOptsDefault)
cluster := newTestCluster(t, testClusterOptions{hosts: 1})
defer cluster.Shutdown()

// convenience variables
b := cluster.Bus
w := cluster.Worker
a := cluster.Autopilot
tt := cluster.tt

Expand All @@ -45,26 +44,13 @@ func TestHostPruning(t *testing.T) {
tt.OK(b.RecordHostScans(context.Background(), his))
}

// add a host
hosts := cluster.AddHosts(1)
h1 := hosts[0]

// fetch the host
h, err := b.Host(context.Background(), h1.PublicKey())
tt.OK(err)

// scan the host (lastScan needs to be > 0 for downtime to start counting)
tt.OKAll(w.RHPScan(context.Background(), h1.PublicKey(), h.NetAddress, 0))

// block the host
tt.OK(b.UpdateHostBlocklist(context.Background(), []string{h1.PublicKey().String()}, nil, false))
// shut down the worker manually, this will flush any interactions
cluster.ShutdownWorker(context.Background())

// remove it from the cluster manually
h1 := cluster.hosts[0]
cluster.RemoveHost(h1)

// shut down the worker manually, this will flush any interactions
cluster.ShutdownWorker(context.Background())

// record 9 failed interactions, right before the pruning threshold, and
// wait for the autopilot loop to finish at least once
recordFailedInteractions(9, h1.PublicKey())
Expand Down

0 comments on commit de6679e

Please sign in to comment.