From 931b23edf889110999d1eb2a28a6b2626d584b5c Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 13 Mar 2024 14:09:06 +0100 Subject: [PATCH 01/16] stores: add TestContractMetricsQueryPlan --- stores/metrics.go | 7 +++++- stores/sql_test.go | 53 ++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 8 deletions(-) diff --git a/stores/metrics.go b/stores/metrics.go index 333ed8a42..3f069bd0c 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -555,11 +555,16 @@ func (s *SQLStore) findAggregatedContractPeriods(start time.Time, n uint64, inte return fmt.Errorf("failed to fetch distinct contract ids: %w", err) } + var indexHint string + if !isSQLite(tx) { + indexHint = "USE INDEX (idx_contracts_fcid_timestamp)" + } + for intervalStart := start; intervalStart.Before(end); intervalStart = intervalStart.Add(interval) { intervalEnd := intervalStart.Add(interval) for _, fcid := range fcids { var metrics []dbContractMetric - err := tx.Raw("SELECT * FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", unixTimeMS(intervalStart), unixTimeMS(intervalEnd), fileContractID(fcid)). + err := tx.Raw(fmt.Sprintf("SELECT * FROM contracts %s WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", indexHint), unixTimeMS(intervalStart), unixTimeMS(intervalEnd), fileContractID(fcid)). Scan(&metrics).Error if err != nil { return fmt.Errorf("failed to fetch contract metrics: %w", err) diff --git a/stores/sql_test.go b/stores/sql_test.go index 776e3e10e..2a7686cea 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -332,20 +332,27 @@ type sqliteQueryPlan struct { Detail string `json:"detail"` } -func (p sqliteQueryPlan) usesIndex() bool { +func (p sqliteQueryPlan) usesIndex(index string) bool { d := strings.ToLower(p.Detail) - return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") + if index == "" { + return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") + } + return strings.Contains(d, fmt.Sprintf("using index %s", index)) } //nolint:tagliatelle type mysqlQueryPlan struct { Extra string `json:"Extra"` PossibleKeys string `json:"possible_keys"` + Key string `json:"key"` } -func (p mysqlQueryPlan) usesIndex() bool { - d := strings.ToLower(p.Extra) - return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") +func (p mysqlQueryPlan) usesIndex(index string) bool { + if index == "" { + d := strings.ToLower(p.Extra) + return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") + } + return p.Key == index } func TestQueryPlan(t *testing.T) { @@ -385,20 +392,52 @@ func TestQueryPlan(t *testing.T) { var explain sqliteQueryPlan if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { t.Fatal(err) - } else if !explain.usesIndex() { + } else if !explain.usesIndex("") { t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) } } else { var explain mysqlQueryPlan if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { t.Fatal(err) - } else if !explain.usesIndex() { + } else if !explain.usesIndex("") { t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) } } } } +func TestContractMetricsQueryPlan(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + query := "SELECT * FROM contracts WHERE contracts.timestamp >= 1 AND contracts.timestamp < 2 AND contracts.fcid = '' LIMIT 1" + queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) + + if isSQLite(ss.dbMetrics) { + // in SQLite the query uses the index we want by default + var explain sqliteQueryPlan + if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("index 'idx_contracts_fcid_timestamp' not used in query '%s', plan %+v", query, explain) + } + } else { + var explain mysqlQueryPlan + if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex("") || explain.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("index 'idx_contracts_fcid_timestamp' not expected to be used in query '%s' although it should use an index, plan %+v", query, explain) + } + + // update query to specify the index + if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", queryWithHint)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("index 'idx_contracts_fcid_timestamp' should've been used in query '%s', plan %+v", query, explain) + } + } +} + func TestApplyUpdatesErr(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From f3d76768bcb860d741aad35cbaf6cef3821dbcf5 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 13 Mar 2024 14:24:51 +0100 Subject: [PATCH 02/16] stores: add helpers to clean up TestQueryPlan and TestContractMetricsQueryPlan --- stores/sql_test.go | 74 ++++++++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/stores/sql_test.go b/stores/sql_test.go index 2a7686cea..170edbf87 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -388,20 +388,11 @@ func TestQueryPlan(t *testing.T) { } for _, query := range queries { - if isSQLite(ss.db) { - var explain sqliteQueryPlan - if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("") { - t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) - } - } else { - var explain mysqlQueryPlan - if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("") { - t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) - } + plan := queryPlan(ss.db) + if err := explainQuery(ss.db, query, plan); err != nil { + t.Fatal(err) + } else if !plan.usesIndex("") { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, plan) } } } @@ -409,35 +400,54 @@ func TestQueryPlan(t *testing.T) { func TestContractMetricsQueryPlan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() + db := ss.dbMetrics - query := "SELECT * FROM contracts WHERE contracts.timestamp >= 1 AND contracts.timestamp < 2 AND contracts.fcid = '' LIMIT 1" - queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) + query := "SELECT * FROM contracts c WHERE c.timestamp >= 1 AND c.timestamp < 2 AND c.fcid = '' LIMIT 1" + plan := queryPlan(db) + if err := explainQuery(db, query, plan); err != nil { + t.Fatal(err) + } - if isSQLite(ss.dbMetrics) { - // in SQLite the query uses the index we want by default - var explain sqliteQueryPlan - if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("index 'idx_contracts_fcid_timestamp' not used in query '%s', plan %+v", query, explain) + if isSQLite(db) { + // SQLite uses the index by default + if !plan.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("unexpected query plan %+v", plan) } } else { - var explain mysqlQueryPlan - if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("") || explain.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("index 'idx_contracts_fcid_timestamp' not expected to be used in query '%s' although it should use an index, plan %+v", query, explain) + // MySQL uses an index, but not 'idx_contracts_fcid_timestamp' + if !plan.usesIndex("") || plan.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("unexpected query plan %+v", plan) } - // update query to specify the index - if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", queryWithHint)).Scan(&explain).Error; err != nil { + // redo the query with hint + queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) + if err := explainQuery(db, queryWithHint, plan); err != nil { t.Fatal(err) - } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("index 'idx_contracts_fcid_timestamp' should've been used in query '%s', plan %+v", query, explain) + } + + // assert it uses 'idx_contracts_fcid_timestamp' now + if !plan.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("unexpected query plan %+v", plan) } } } +func queryPlan(db *gorm.DB) interface{ usesIndex(index string) bool } { + if isSQLite(db) { + return &sqliteQueryPlan{} + } + return &mysqlQueryPlan{} +} + +func explainQuery(db *gorm.DB, query string, res interface{}) (err error) { + if isSQLite(db) { + err = db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&res).Error + } else { + err = db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&res).Error + } + return +} + func TestApplyUpdatesErr(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From 8e47893580dae2e8403e6de56e36670bc94359f4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 18 Mar 2024 15:06:25 +0100 Subject: [PATCH 03/16] autopilot: no more gouging on collateral --- api/setting.go | 4 ---- autopilot/autopilot.go | 1 - autopilot/hostscore.go | 2 +- build/env_default.go | 1 - internal/test/config.go | 1 - worker/gouging.go | 12 ------------ 6 files changed, 1 insertion(+), 20 deletions(-) diff --git a/api/setting.go b/api/setting.go index 47785c9aa..86162c0a8 100644 --- a/api/setting.go +++ b/api/setting.go @@ -38,10 +38,6 @@ type ( // GougingSettings contain some price settings used in price gouging. GougingSettings struct { - // MinMaxCollateral is the minimum value for 'MaxCollateral' in the host's - // price settings - MinMaxCollateral types.Currency `json:"minMaxCollateral"` - // MaxRPCPrice is the maximum allowed base price for RPCs MaxRPCPrice types.Currency `json:"maxRPCPrice"` diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..dc5b84dfc 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -804,7 +804,6 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // these are not optimised, so we keep the same values as the user // provided - MinMaxCollateral: gs.MinMaxCollateral, HostBlockHeightLeeway: gs.HostBlockHeightLeeway, MinPriceTableValidity: gs.MinPriceTableValidity, MinAccountExpiry: gs.MinAccountExpiry, diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index e8d9ca9b9..3c26dce42 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -161,7 +161,7 @@ func collateralScore(cfg api.AutopilotConfig, pt rhpv3.HostPriceTable, allocatio cutoffMultiplier := uint64(4) if expectedCollateral.Cmp(cutoff) < 0 { - return 0 // expectedCollateral <= cutoff -> score is 0 + return math.SmallestNonzeroFloat64 // expectedCollateral <= cutoff -> score is basically 0 } else if expectedCollateral.Cmp(cutoff.Mul64(cutoffMultiplier)) >= 0 { return 1 // expectedCollateral is 10x cutoff -> score is 1 } else { diff --git a/build/env_default.go b/build/env_default.go index 8820ec6ae..83003de60 100644 --- a/build/env_default.go +++ b/build/env_default.go @@ -22,7 +22,6 @@ var ( // configured with on startup. These values can be adjusted using the // settings API. DefaultGougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(1), // 1 SC per contract MaxDownloadPrice: types.Siacoins(3000), // 3000 SC per 1 TiB diff --git a/internal/test/config.go b/internal/test/config.go index 7553fa16d..68a5fff5b 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -39,7 +39,6 @@ var ( } GougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(10), // 10 SC per contract MaxDownloadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB diff --git a/worker/gouging.go b/worker/gouging.go index a7b2078a1..e8e362040 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -171,14 +171,6 @@ func checkPriceGougingHS(gs api.GougingSettings, hs *rhpv2.HostSettings) error { return fmt.Errorf("contract price exceeds max: %v > %v", hs.ContractPrice, gs.MaxContractPrice) } - // check max collateral - if hs.MaxCollateral.IsZero() { - return errors.New("MaxCollateral of host is 0") - } - if hs.MaxCollateral.Cmp(gs.MinMaxCollateral) < 0 { - return fmt.Errorf("MaxCollateral is below minimum: %v < %v", hs.MaxCollateral, gs.MinMaxCollateral) - } - // check max EA balance if hs.MaxEphemeralAccountBalance.Cmp(gs.MinMaxEphemeralAccountBalance) < 0 { return fmt.Errorf("'MaxEphemeralAccountBalance' is less than the allowed minimum value, %v < %v", hs.MaxEphemeralAccountBalance, gs.MinMaxEphemeralAccountBalance) @@ -219,10 +211,6 @@ func checkPriceGougingPT(gs api.GougingSettings, cs api.ConsensusState, txnFee t if pt.MaxCollateral.IsZero() { return errors.New("MaxCollateral of host is 0") } - if pt.MaxCollateral.Cmp(gs.MinMaxCollateral) < 0 { - return fmt.Errorf("MaxCollateral is below minimum: %v < %v", pt.MaxCollateral, gs.MinMaxCollateral) - } - // check ReadLengthCost - should be 1H as it's unused by hosts if types.NewCurrency64(1).Cmp(pt.ReadLengthCost) < 0 { return fmt.Errorf("ReadLengthCost of host is %v but should be %v", pt.ReadLengthCost, types.NewCurrency64(1)) From 82d74327861b6249e68eb4509932094e1d898387 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 10:55:04 +0100 Subject: [PATCH 04/16] bus: add filter mode to HostsOptions --- api/host.go | 12 ++++-- autopilot/autopilot.go | 12 +++--- autopilot/autopilot_test.go | 47 ++++++++++---------- autopilot/contractor.go | 10 ++--- autopilot/hostfilter.go | 6 ++- autopilot/hostinfo.go | 8 ++-- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 10 +++-- bus/bus.go | 21 +++++++-- bus/client/hosts.go | 4 +- internal/test/e2e/blocklist_test.go | 6 +-- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 30 +++++++++---- stores/hostdb_test.go | 67 +++++++++++++++++------------ 15 files changed, 146 insertions(+), 95 deletions(-) diff --git a/api/host.go b/api/host.go index aea80a9fe..ba66ffd58 100644 --- a/api/host.go +++ b/api/host.go @@ -70,9 +70,10 @@ type ( // Option types. type ( - GetHostsOptions struct { - Offset int - Limit int + HostsOptions struct { + Offset int + Limit int + FilterMode string } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 @@ -95,13 +96,16 @@ func DefaultSearchHostOptions() SearchHostOptions { } } -func (opts GetHostsOptions) Apply(values url.Values) { +func (opts HostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } + if opts.FilterMode != "" { + values.Set("filterMode", opts.FilterMode) + } } func (opts HostsForScanningOptions) Apply(values url.Values) { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..d8a760265 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -54,10 +54,10 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) + Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.Host, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) // metrics RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error @@ -196,7 +196,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) + hosts, err := ap.bus.Hosts(ctx, api.HostsOptions{}) if jc.Check("failed to get hosts", err) != nil { return } @@ -735,7 +735,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { jc.Encode(hosts) } -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) @@ -749,7 +749,7 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // evaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) resp.Hosts = uint64(len(hosts)) @@ -865,7 +865,7 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // optimiseGougingSetting tries to optimise one field of the gouging settings to // try and hit the target number of contracts. -func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { if cfg.Contracts.Amount == 0 { return true // nothing to do } diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index f818c312b..9ebafe675 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -14,31 +14,34 @@ import ( func TestOptimiseGougingSetting(t *testing.T) { // create 10 hosts that should all be usable - var hosts []hostdb.Host + var hosts []hostdb.HostInfo for i := 0; i < 10; i++ { - hosts = append(hosts, hostdb.Host{ - KnownSince: time.Unix(0, 0), - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{ - CollateralCost: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), + hosts = append(hosts, hostdb.HostInfo{ + Host: hostdb.Host{ + KnownSince: time.Unix(0, 0), + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + }, }, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, }, - Settings: rhpv2.HostSettings{ - AcceptingContracts: true, - Collateral: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - Version: "1.6.0", - }, - Interactions: hostdb.Interactions{ - Uptime: time.Hour * 1000, - LastScan: time.Now(), - LastScanSuccess: true, - SecondToLastScanSuccess: true, - TotalScans: 100, - }, - LastAnnouncement: time.Unix(0, 0), - Scanned: true, + Blocked: false, }) } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 83e12a206..47a03480f 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.Hosts(ctx, api.GetHostsOptions{}) + hosts, err := c.ap.bus.Hosts(ctx, api.HostsOptions{}) if err != nil { return false, err } @@ -777,7 +777,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.Host, minScore, contract.FileSize()) + usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, contract.FileSize()) if !usable { reasons := unusableResult.reasons() toStopUsing[fcid] = strings.Join(reasons, ",") @@ -1297,7 +1297,7 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { start := time.Now() // fetch consensus state @@ -1311,7 +1311,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) // select unused hosts that passed a scan - var unused []hostdb.Host + var unused []hostdb.HostInfo var excluded, notcompletedscan int for _, h := range hosts { // filter out used hosts @@ -1348,7 +1348,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us h.PriceTable.HostBlockHeight = cs.BlockHeight usable, result := isUsableHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) if usable { - candidates = append(candidates, scoredHost{h, result.scoreBreakdown.Score()}) + candidates = append(candidates, scoredHost{h.Host, result.scoreBreakdown.Score()}) continue } diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 574862a97..6f8e4f747 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -176,7 +176,7 @@ func (u *unusableHostResult) keysAndValues() []interface{} { // isUsableHost returns whether the given host is usable along with a list of // reasons why it was deemed unusable. -func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.Host, minScore float64, storedData uint64) (bool, unusableHostResult) { +func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.HostInfo, minScore float64, storedData uint64) (bool, unusableHostResult) { if rs.Validate() != nil { panic("invalid redundancy settings were supplied - developer error") } @@ -187,6 +187,8 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. if !h.IsAnnounced() { errs = append(errs, errHostNotAnnounced) + } else if h.Blocked { + errs = append(errs, errHostBlocked) } else if !h.Scanned { errs = append(errs, errHostNotCompletingScan) } else { @@ -211,7 +213,7 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. // not gouging, this because the core package does not have overflow // checks in its cost calculations needed to calculate the period // cost - scoreBreakdown = hostScore(cfg, h, storedData, rs.Redundancy()) + scoreBreakdown = hostScore(cfg, h.Host, storedData, rs.Redundancy()) if scoreBreakdown.Score() < minScore { errs = append(errs, fmt.Errorf("%w: (%s): %v < %v", errLowScore, scoreBreakdown.String(), scoreBreakdown.Score(), minScore)) } diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go index 82efa1d61..e0cbecadc 100644 --- a/autopilot/hostinfo.go +++ b/autopilot/hostinfo.go @@ -53,7 +53,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.Host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host, minScore, storedData) return api.HostHandlerResponse{ Host: host.Host, Checks: &api.HostHandlerResponseChecks{ @@ -67,7 +67,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api }, nil } -func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.Host) (hi hostInfo, found bool) { +func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo) (hi hostInfo, found bool) { // grab host details from cache c.mu.Lock() hi, found = c.cachedHostInfo[host.PublicKey] @@ -157,7 +157,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a // set IsChecked = false. if usabilityMode == api.UsabilityFilterModeAll { hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, + Host: host.Host, }) if wanted > 0 && len(hostInfos) == wanted { return hostInfos, nil // we're done. @@ -170,7 +170,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a continue } hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, + Host: host.Host, Checks: &api.HostHandlerResponseChecks{ Gouging: hi.UnusableResult.gougingBreakdown.Gouging(), GougingBreakdown: hi.UnusableResult.gougingBreakdown, diff --git a/autopilot/scanner.go b/autopilot/scanner.go index bb21e5022..85301822c 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) + Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 6214ec4a1..481b78046 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) { +func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -32,11 +32,15 @@ func (b *mockBus) Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb end = len(b.hosts) } - return b.hosts[start:end], nil + his := make([]hostdb.HostInfo, len(b.hosts[start:end])) + for i, h := range b.hosts[start:end] { + his[i] = hostdb.HostInfo{Host: h} + } + return his, nil } func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { - hosts, err := b.Hosts(ctx, api.GetHostsOptions{ + hosts, err := b.Hosts(ctx, api.HostsOptions{ Offset: opts.Offset, Limit: opts.Limit, }) diff --git a/bus/bus.go b/bus/bus.go index d8b3fdfc5..3838a1877 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -92,13 +92,13 @@ type ( // A HostDB stores information about hosts. HostDB interface { Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, offset, limit int) ([]hostdb.Host, error) + Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error - SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.Host, error) + SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) @@ -758,10 +758,23 @@ func (b *bus) walletPendingHandler(jc jape.Context) { func (b *bus) hostsHandlerGET(jc jape.Context) { offset := 0 limit := -1 - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { + filterMode := api.HostFilterModeAllowed + if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("filterMode", &filterMode) != nil { return } - hosts, err := b.hdb.Hosts(jc.Request.Context(), offset, limit) + + // validate filterMode + switch filterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + default: + jc.Error(errors.New("invalid filter mode"), http.StatusBadRequest) + return + } + + // fetch hosts + hosts, err := b.hdb.Hosts(jc.Request.Context(), filterMode, offset, limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { return } diff --git a/bus/client/hosts.go b/bus/client/hosts.go index ecf44e52b..70c8b3431 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -30,7 +30,7 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err } // Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []hostdb.Host, err error) { +func (c *Client) Hosts(ctx context.Context, opts api.HostsOptions) (hosts []hostdb.HostInfo, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) @@ -78,7 +78,7 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) } // SearchHosts returns all hosts that match certain search criteria. -func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []hostdb.Host, err error) { +func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []hostdb.HostInfo, err error) { err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ Offset: opts.Offset, Limit: opts.Limit, diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 64acc2fba..e371f01d4 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -117,7 +117,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -142,7 +142,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -152,7 +152,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2346f7019..65febbbf7 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := cluster.Bus.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index de948c970..b5f6cccd0 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err := b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index fd23abf4a..101ee298d 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -461,23 +461,25 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, return hostAddresses, err } -func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.Host, error) { +func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) { if offset < 0 { return nil, ErrNegativeOffset } - var hosts []hostdb.Host - var fullHosts []dbHost - // Apply filter mode. + var blocked bool query := ss.db switch filterMode { case api.HostFilterModeAllowed: query = query.Scopes(ss.excludeBlocked) case api.HostFilterModeBlocked: query = query.Scopes(ss.excludeAllowed) + blocked = true case api.HostFilterModeAll: - // nothing to do + // preload allowlist and blocklist + query = query. + Preload("Allowlist"). + Preload("Blocklist") default: return nil, fmt.Errorf("invalid filter mode: %v", filterMode) } @@ -500,12 +502,24 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains }) } + var hosts []hostdb.HostInfo + var fullHosts []dbHost err := query. Offset(offset). Limit(limit). FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { for _, fh := range fullHosts { - hosts = append(hosts, fh.convert()) + if filterMode == api.HostFilterModeAll { + hosts = append(hosts, hostdb.HostInfo{ + Host: fh.convert(), + Blocked: ss.isBlocked(fh), + }) + } else { + hosts = append(hosts, hostdb.HostInfo{ + Host: fh.convert(), + Blocked: blocked, + }) + } } return nil }). @@ -517,8 +531,8 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains } // Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]hostdb.Host, error) { - return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) +func (ss *SQLStore) Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) { + return ss.SearchHosts(ctx, filterMode, "", nil, offset, limit) } func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 35872ea2d..528700502 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, 0, -1) + allHosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } @@ -171,27 +171,45 @@ func TestSQLHosts(t *testing.T) { hk1, hk2, hk3 := hks[0], hks[1], hks[2] // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.Hosts(ctx, -1, -1); err != ErrNegativeOffset { + if _, err := ss.Hosts(ctx, api.HostFilterModeAllowed, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } - // Add a scan for each host. + // add a custom host and block it + hk4 := types.PublicKey{4} + if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { + t.Fatal("unexpected", err) + } + if err := ss.UpdateHostBlocklistEntries(context.Background(), []string{"host4.com"}, nil, false); err != nil { + t.Fatal("unexpected", err) + } + + // assert host filter mode is applied + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAll, 0, -1); err != nil || len(hosts) != 4 { + t.Fatal("unexpected", len(hosts), err) + } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeBlocked, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + t.Fatal("unexpected", len(hosts), err) + } + + // add a scan for every non-blocked host n := time.Now() if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { t.Fatal(err) @@ -203,39 +221,32 @@ func TestSQLHosts(t *testing.T) { t.Fatal(err) } - // Fetch all hosts using the HostsForScanning method. - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) + // fetch all hosts using the HostsForScanning method + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 4) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 3 { + } else if len(hostAddresses) != 4 { t.Fatal("wrong number of addresses") - } - if hostAddresses[0].PublicKey != hk3 { - t.Fatal("wrong key") - } - if hostAddresses[1].PublicKey != hk2 { - t.Fatal("wrong key") - } - if hostAddresses[2].PublicKey != hk1 { + } else if hostAddresses[0].PublicKey != hk4 || + hostAddresses[1].PublicKey != hk3 || + hostAddresses[2].PublicKey != hk2 || + hostAddresses[3].PublicKey != hk1 { t.Fatal("wrong key") } - // Fetch one host by setting the cutoff exactly to hk2. - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) + // fetch one host by setting the cutoff exactly to hk3 + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-3*time.Minute), 0, -1) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 1 { + } else if len(hostAddresses) != 1 { t.Fatal("wrong number of addresses") } - // Fetch no hosts. + // fetch no hosts hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 0 { + } else if len(hostAddresses) != 0 { t.Fatal("wrong number of addresses") } } @@ -595,7 +606,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } @@ -767,7 +778,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } From dabe9838bf48351cdbfe376d2ce370fe1ed4db0e Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 11:21:01 +0100 Subject: [PATCH 05/16] autopilot: update host filter --- autopilot/hostfilter.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 6f8e4f747..8de37221a 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -182,13 +182,14 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. } var errs []error + if h.Blocked { + errs = append(errs, errHostBlocked) + } + var gougingBreakdown api.HostGougingBreakdown var scoreBreakdown api.HostScoreBreakdown - if !h.IsAnnounced() { errs = append(errs, errHostNotAnnounced) - } else if h.Blocked { - errs = append(errs, errHostBlocked) } else if !h.Scanned { errs = append(errs, errHostNotCompletingScan) } else { From 041eb4247593be7808335ecff7cfc6db849996a9 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 14:23:47 +0100 Subject: [PATCH 06/16] build: remove MinMaxCollateral --- README.md | 1 - build/env_testnet.go | 1 - 2 files changed, 2 deletions(-) diff --git a/README.md b/README.md index a4ccc8681..1585854f6 100644 --- a/README.md +++ b/README.md @@ -250,7 +250,6 @@ updated using the settings API: "maxUploadPrice": "3000000000000000000000000000", // 3000 SC per 1 TiB "migrationSurchargeMultiplier": 10, // overpay up to 10x for sectors migrations on critical slabs "minAccountExpiry": 86400000000000, // 1 day - "minMaxCollateral": "10000000000000000000000000", // at least up to 10 SC per contract "minMaxEphemeralAccountBalance": "1000000000000000000000000", // 1 SC "minPriceTableValidity": 300000000000 // 5 minutes } diff --git a/build/env_testnet.go b/build/env_testnet.go index 1bc40d287..0bdef28f2 100644 --- a/build/env_testnet.go +++ b/build/env_testnet.go @@ -24,7 +24,6 @@ var ( // // NOTE: default gouging settings for testnet are identical to mainnet. DefaultGougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(15), // 15 SC per contract MaxDownloadPrice: types.Siacoins(3000), // 3000 SC per 1 TiB From 90f454808b8ad411b30b3d55d11d57616d2eb835 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 14:37:07 +0100 Subject: [PATCH 07/16] autopilot: return affected object ids in migration alerts --- autopilot/alerts.go | 50 +++++++++------ autopilot/autopilot.go | 3 + autopilot/migrator.go | 45 +++++++++++++- internal/test/e2e/migrations_test.go | 93 +++++++++++++++++++++------- 4 files changed, 147 insertions(+), 44 deletions(-) diff --git a/autopilot/alerts.go b/autopilot/alerts.go index f4762c4d4..7a98f8918 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -194,22 +194,37 @@ func newCriticalMigrationSucceededAlert(slabKey object.EncryptionKey) alerts.Ale } } -func newCriticalMigrationFailedAlert(slabKey object.EncryptionKey, health float64, err error) alerts.Alert { +func newCriticalMigrationFailedAlert(slabKey object.EncryptionKey, health float64, objectIds map[string][]string, err error) alerts.Alert { + data := map[string]interface{}{ + "error": err.Error(), + "health": health, + "slabKey": slabKey.String(), + "hint": "If migrations of low-health slabs fail, it might be necessary to increase the MigrationSurchargeMultiplier in the gouging settings to ensure it has every chance of succeeding.", + } + if objectIds != nil { + data["objectIDs"] = objectIds + } + return alerts.Alert{ - ID: alertIDForSlab(alertMigrationID, slabKey), - Severity: alerts.SeverityCritical, - Message: "Critical migration failed", - Data: map[string]interface{}{ - "error": err.Error(), - "health": health, - "slabKey": slabKey.String(), - "hint": "If migrations of low-health slabs fail, it might be necessary to increase the MigrationSurchargeMultiplier in the gouging settings to ensure it has every chance of succeeding.", - }, + ID: alertIDForSlab(alertMigrationID, slabKey), + Severity: alerts.SeverityCritical, + Message: "Critical migration failed", + Data: data, Timestamp: time.Now(), } } -func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, err error) alerts.Alert { +func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, objectIds map[string][]string, err error) alerts.Alert { + data := map[string]interface{}{ + "error": err.Error(), + "health": health, + "slabKey": slabKey.String(), + "hint": "Migration failures can be temporary, but if they persist it can eventually lead to data loss and should therefor be taken very seriously.", + } + if objectIds != nil { + data["objectIDs"] = objectIds + } + severity := alerts.SeverityError if health < 0.25 { severity = alerts.SeverityCritical @@ -218,15 +233,10 @@ func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, err e } return alerts.Alert{ - ID: alertIDForSlab(alertMigrationID, slabKey), - Severity: severity, - Message: "Slab migration failed", - Data: map[string]interface{}{ - "error": err.Error(), - "health": health, - "slabKey": slabKey.String(), - "hint": "Migration failures can be temporary, but if they persist it can eventually lead to data loss and should therefor be taken very seriously.", - }, + ID: alertIDForSlab(alertMigrationID, slabKey), + Severity: severity, + Message: "Slab migration failed", + Data: data, Timestamp: time.Now(), } } diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..69b7bc458 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -63,6 +63,9 @@ type Bus interface { RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error RecordContractPruneMetric(ctx context.Context, metrics ...api.ContractPruneMetric) error + // buckets + ListBuckets(ctx context.Context) ([]api.Bucket, error) + // objects ObjectsBySlabKey(ctx context.Context, bucket string, key object.EncryptionKey) (objects []api.ObjectMetadata, err error) RefreshHealth(ctx context.Context) error diff --git a/autopilot/migrator.go b/autopilot/migrator.go index 89ab16a28..d389a051b 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -154,15 +154,23 @@ func (m *migrator) performMigrations(p *workerPool) { start := time.Now() res, err := j.execute(ctx, w) m.statsSlabMigrationSpeedMS.Track(float64(time.Since(start).Milliseconds())) - if err != nil { m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, err) skipAlert := utils.IsErr(err, api.ErrSlabNotFound) if !skipAlert { + // fetch all object IDs for the slab we failed to migrate + var objectIds map[string][]string + if res, err := m.objectIDsForSlabKey(ctx, j.Key); err != nil { + m.logger.Errorf("failed to fetch object ids for slab key; %w", err) + } else { + objectIds = res + } + + // register the alert if res.SurchargeApplied { - m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, err)) + m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, objectIds, err)) } else { - m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.Key, j.Health, err)) + m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.Key, j.Health, objectIds, err)) } } } else { @@ -274,3 +282,34 @@ OUTER: return } } + +func (m *migrator) objectIDsForSlabKey(ctx context.Context, key object.EncryptionKey) (map[string][]string, error) { + // fetch all buckets + // + // NOTE:at the time of writing the bus does not support fetching objects by + // slab key across all buckets at once, therefor we have to list all buckets + // and loop over them, revisit on the next major release + buckets, err := m.ap.bus.ListBuckets(ctx) + if err != nil { + return nil, fmt.Errorf("%w; failed to list buckets", err) + } + + // fetch all objects per bucket + idsPerBucket := make(map[string][]string) + for _, bucket := range buckets { + objects, err := m.ap.bus.ObjectsBySlabKey(ctx, bucket.Name, key) + if err != nil { + m.logger.Errorf("failed to fetch objects for slab key in bucket %v; %w", bucket, err) + continue + } else if len(objects) == 0 { + continue + } + + idsPerBucket[bucket.Name] = make([]string, len(objects)) + for i, object := range objects { + idsPerBucket[bucket.Name][i] = object.Name + } + } + + return idsPerBucket, nil +} diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index 91bcc20b7..7b8e7b072 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -4,11 +4,14 @@ import ( "bytes" "context" "errors" + "fmt" + "reflect" "testing" "time" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" @@ -19,27 +22,29 @@ func TestMigrations(t *testing.T) { t.SkipNow() } - // create a new test cluster + // configure the cluster to use one extra host + rs := test.RedundancySettings cfg := test.AutopilotConfig - cfg.Contracts.Amount = uint64(test.RedundancySettings.TotalShards) + 1 + cfg.Contracts.Amount = uint64(rs.TotalShards) + 1 + + // create a new test cluster cluster := newTestCluster(t, testClusterOptions{ - // configure the cluster to use 1 more host than the total shards in the - // redundancy settings. autopilotSettings: &cfg, - hosts: int(test.RedundancySettings.TotalShards) + 1, + hosts: int(cfg.Contracts.Amount), }) defer cluster.Shutdown() + // convenience variables + b := cluster.Bus + w := cluster.Worker + tt := cluster.tt + // create a helper to fetch used hosts usedHosts := func(path string) map[types.PublicKey]struct{} { - // fetch used hosts - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) - if err != nil { - t.Fatal(err) - } else if res.Object == nil { + res, _ := b.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) + if res.Object == nil { t.Fatal("object not found") } - used := make(map[types.PublicKey]struct{}) for _, slab := range res.Object.Slabs { for _, sector := range slab.Shards { @@ -49,18 +54,13 @@ func TestMigrations(t *testing.T) { return used } - // convenience variables - w := cluster.Worker - tt := cluster.tt - // add an object data := make([]byte, rhpv2.SectorSize) frand.Read(data) - path := "foo" - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, t.Name(), api.UploadObjectOptions{})) // assert amount of hosts used - used := usedHosts(path) + used := usedHosts(t.Name()) if len(used) != test.RedundancySettings.TotalShards { t.Fatal("unexpected amount of hosts used", len(used), test.RedundancySettings.TotalShards) } @@ -77,13 +77,12 @@ func TestMigrations(t *testing.T) { // assert we migrated away from the bad host tt.Retry(300, 100*time.Millisecond, func() error { - if _, used := usedHosts(path)[removed]; used { + if _, used := usedHosts(t.Name())[removed]; used { return errors.New("host is still used") } return nil }) - - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) + res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, t.Name(), api.GetObjectOptions{}) tt.OK(err) // check slabs @@ -109,8 +108,60 @@ func TestMigrations(t *testing.T) { shardHosts += len(shard.Contracts) } } + // all shards should have 1 host except for 1. So we end up with 4 in total. if shardHosts != 4 { t.Fatalf("expected 4 shard hosts, got %v", shardHosts) } + + // create another bucket and upload an object into it + tt.OK(b.CreateBucket(context.Background(), "newbucket", api.CreateBucketOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), "newbucket", t.Name(), api.UploadObjectOptions{})) + + // assert we currently don't have any error/crit alerts + ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) + if ress.Totals.Error+ress.Totals.Critical != 0 { + t.Fatal("unexpected", ress) + } + + // remove all hosts to ensure migrations fail + for _, h := range cluster.hosts { + cluster.RemoveHost(h) + } + + // fetch alerts and collect object ids until we found two + seen := make(map[types.Hash256]struct{}) + got := make(map[string][]string) + tt.Retry(100, 100*time.Millisecond, func() error { + ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) + if ress.Totals.Error+ress.Totals.Critical == 0 { + return errors.New("no migration alerts") + } + for _, alert := range ress.Alerts { + if _, skip := seen[alert.ID]; !skip { + seen[alert.ID] = struct{}{} + if data, ok := alert.Data["objectIDs"]; ok { + if data, ok := data.(map[string]interface{}); ok { + for bucket, ids := range data { + if objectIDs, ok := ids.([]interface{}); ok { + for _, id := range objectIDs { + got[bucket] = append(got[bucket], id.(string)) + } + } + } + } + } + } + } + if len(got) < 2 { + return errors.New("not enought object ids") + } + return nil + }) + if !reflect.DeepEqual(map[string][]string{ + api.DefaultBucketName: {fmt.Sprintf("/%s", t.Name())}, + "newbucket": {fmt.Sprintf("/%s", t.Name())}, + }, got) { + t.Fatal("unexpected", got) + } } From def6135059770c731149e9db9e7b04c6ad8a085a Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 14:48:47 +0100 Subject: [PATCH 08/16] testing: cleanup TestMigrations --- internal/test/e2e/migrations_test.go | 49 +++++++++++++++++----------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index 7b8e7b072..3e99f30a7 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" @@ -114,16 +115,18 @@ func TestMigrations(t *testing.T) { t.Fatalf("expected 4 shard hosts, got %v", shardHosts) } - // create another bucket and upload an object into it + // create another bucket and add an object tt.OK(b.CreateBucket(context.Background(), "newbucket", api.CreateBucketOptions{})) tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), "newbucket", t.Name(), api.UploadObjectOptions{})) - // assert we currently don't have any error/crit alerts + // assert we currently don't have any alerts ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) if ress.Totals.Error+ress.Totals.Critical != 0 { t.Fatal("unexpected", ress) } + // prepare + // remove all hosts to ensure migrations fail for _, h := range cluster.hosts { cluster.RemoveHost(h) @@ -138,30 +141,38 @@ func TestMigrations(t *testing.T) { return errors.New("no migration alerts") } for _, alert := range ress.Alerts { - if _, skip := seen[alert.ID]; !skip { - seen[alert.ID] = struct{}{} - if data, ok := alert.Data["objectIDs"]; ok { - if data, ok := data.(map[string]interface{}); ok { - for bucket, ids := range data { - if objectIDs, ok := ids.([]interface{}); ok { - for _, id := range objectIDs { - got[bucket] = append(got[bucket], id.(string)) - } - } + // skip if already seen + if _, skip := seen[alert.ID]; skip { + continue + } + seen[alert.ID] = struct{}{} + + // skip if not a migration alert + data, ok := alert.Data["objectIDs"].(map[string]interface{}) + if !ok { + continue + } + + // collect all object ids per bucket + for bucket, ids := range data { + if objectIDs, ok := ids.([]interface{}); ok { + for _, id := range objectIDs { + got[bucket] = append(got[bucket], id.(string)) + if len(got) == 2 { + return nil } } } } } - if len(got) < 2 { - return errors.New("not enought object ids") - } - return nil + return errors.New("haven't found two migration alerts yet") }) - if !reflect.DeepEqual(map[string][]string{ + + // assert we found our two objects across two buckets + if want := map[string][]string{ api.DefaultBucketName: {fmt.Sprintf("/%s", t.Name())}, "newbucket": {fmt.Sprintf("/%s", t.Name())}, - }, got) { - t.Fatal("unexpected", got) + }; !reflect.DeepEqual(want, got) { + t.Fatal("unexpected", cmp.Diff(want, got)) } } From 8da8b4f48b7859c570247fe0928ccb31e07ec690 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 14:50:39 +0100 Subject: [PATCH 09/16] testing: remove TestContractMetricsQueryPlan --- stores/sql_test.go | 93 +++++++++++----------------------------------- 1 file changed, 22 insertions(+), 71 deletions(-) diff --git a/stores/sql_test.go b/stores/sql_test.go index 170edbf87..842f3c9df 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -107,8 +107,8 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { conn = NewMySQLConnection(dbUser, dbPassword, dbURI, dbName) connMetrics = NewMySQLConnection(dbUser, dbPassword, dbURI, dbMetricsName) } else if cfg.persistent { - conn = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) - connMetrics = NewSQLiteConnection(filepath.Join(cfg.dir, "metrics.sqlite")) + conn = NewSQLiteConnection(filepath.Join(dir, "db.sqlite")) + connMetrics = NewSQLiteConnection(filepath.Join(dir, "metrics.sqlite")) } else { conn = NewEphemeralSQLiteConnection(dbName) connMetrics = NewEphemeralSQLiteConnection(dbMetricsName) @@ -292,7 +292,7 @@ func TestConsensusReset(t *testing.T) { }) // Reset the consensus. - if err := ss.ResetConsensusSubscription(); err != nil { + if err := ss.ResetConsensusSubscription(context.Background()); err != nil { t.Fatal(err) } @@ -332,27 +332,20 @@ type sqliteQueryPlan struct { Detail string `json:"detail"` } -func (p sqliteQueryPlan) usesIndex(index string) bool { +func (p sqliteQueryPlan) usesIndex() bool { d := strings.ToLower(p.Detail) - if index == "" { - return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") - } - return strings.Contains(d, fmt.Sprintf("using index %s", index)) + return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") } //nolint:tagliatelle type mysqlQueryPlan struct { Extra string `json:"Extra"` PossibleKeys string `json:"possible_keys"` - Key string `json:"key"` } -func (p mysqlQueryPlan) usesIndex(index string) bool { - if index == "" { - d := strings.ToLower(p.Extra) - return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") - } - return p.Key == index +func (p mysqlQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Extra) + return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") } func TestQueryPlan(t *testing.T) { @@ -388,66 +381,24 @@ func TestQueryPlan(t *testing.T) { } for _, query := range queries { - plan := queryPlan(ss.db) - if err := explainQuery(ss.db, query, plan); err != nil { - t.Fatal(err) - } else if !plan.usesIndex("") { - t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, plan) - } - } -} - -func TestContractMetricsQueryPlan(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - db := ss.dbMetrics - - query := "SELECT * FROM contracts c WHERE c.timestamp >= 1 AND c.timestamp < 2 AND c.fcid = '' LIMIT 1" - plan := queryPlan(db) - if err := explainQuery(db, query, plan); err != nil { - t.Fatal(err) - } - - if isSQLite(db) { - // SQLite uses the index by default - if !plan.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("unexpected query plan %+v", plan) - } - } else { - // MySQL uses an index, but not 'idx_contracts_fcid_timestamp' - if !plan.usesIndex("") || plan.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("unexpected query plan %+v", plan) - } - - // redo the query with hint - queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) - if err := explainQuery(db, queryWithHint, plan); err != nil { - t.Fatal(err) - } - - // assert it uses 'idx_contracts_fcid_timestamp' now - if !plan.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("unexpected query plan %+v", plan) + if isSQLite(ss.db) { + var explain sqliteQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } + } else { + var explain mysqlQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } } } } -func queryPlan(db *gorm.DB) interface{ usesIndex(index string) bool } { - if isSQLite(db) { - return &sqliteQueryPlan{} - } - return &mysqlQueryPlan{} -} - -func explainQuery(db *gorm.DB, query string, res interface{}) (err error) { - if isSQLite(db) { - err = db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&res).Error - } else { - err = db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&res).Error - } - return -} - func TestApplyUpdatesErr(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From 36021ab5deef1755269106ba6b7ca0a3afb7c695 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 21:28:33 +0100 Subject: [PATCH 10/16] bus: deprecate /hosts endpoint infavour of /search/hosts --- api/host.go | 15 ++-- autopilot/autopilot.go | 3 +- autopilot/contractor.go | 2 +- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 4 +- bus/bus.go | 24 ++---- bus/client/hosts.go | 2 +- internal/test/e2e/blocklist_test.go | 8 +- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 5 -- stores/hostdb_test.go | 125 +++++++++++++++------------- 12 files changed, 93 insertions(+), 103 deletions(-) diff --git a/api/host.go b/api/host.go index ba66ffd58..2a9df5f6b 100644 --- a/api/host.go +++ b/api/host.go @@ -70,16 +70,16 @@ type ( // Option types. type ( - HostsOptions struct { - Offset int - Limit int - FilterMode string + GetHostsOptions struct { + Offset int + Limit int } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 Limit int Offset int } + SearchHostOptions struct { AddressContains string FilterMode string @@ -92,20 +92,17 @@ type ( func DefaultSearchHostOptions() SearchHostOptions { return SearchHostOptions{ Limit: -1, - FilterMode: HostFilterModeAll, + FilterMode: HostFilterModeAllowed, } } -func (opts HostsOptions) Apply(values url.Values) { +func (opts GetHostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } - if opts.FilterMode != "" { - values.Set("filterMode", opts.FilterMode) - } } func (opts HostsForScanningOptions) Apply(values url.Values) { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index d8a760265..3b90a8329 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -54,7 +54,6 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) @@ -196,7 +195,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.Hosts(ctx, api.HostsOptions{}) + hosts, err := ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) if jc.Check("failed to get hosts", err) != nil { return } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 47a03480f..ef64f630b 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.Hosts(ctx, api.HostsOptions{}) + hosts, err := c.ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) if err != nil { return false, err } diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 85301822c..76643e5b5 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 481b78046..1cdd096d2 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) { +func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -40,7 +40,7 @@ func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.Ho } func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { - hosts, err := b.Hosts(ctx, api.HostsOptions{ + hosts, err := b.SearchHosts(ctx, api.SearchHostOptions{ Offset: opts.Offset, Limit: opts.Limit, }) diff --git a/bus/bus.go b/bus/bus.go index 3838a1877..0a0614cbf 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -92,7 +92,6 @@ type ( // A HostDB stores information about hosts. HostDB interface { Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error @@ -285,7 +284,7 @@ func (b *bus) Handler() http.Handler { "GET /contract/:id/roots": b.contractIDRootsHandlerGET, "GET /contract/:id/size": b.contractSizeHandlerGET, - "GET /hosts": b.hostsHandlerGET, + "GET /hosts": b.hostsHandlerGETDeprecated, "GET /hosts/allowlist": b.hostsAllowlistHandlerGET, "PUT /hosts/allowlist": b.hostsAllowlistHandlerPUT, "GET /hosts/blocklist": b.hostsBlocklistHandlerGET, @@ -755,26 +754,15 @@ func (b *bus) walletPendingHandler(jc jape.Context) { jc.Encode(relevant) } -func (b *bus) hostsHandlerGET(jc jape.Context) { +func (b *bus) hostsHandlerGETDeprecated(jc jape.Context) { offset := 0 limit := -1 - filterMode := api.HostFilterModeAllowed - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("filterMode", &filterMode) != nil { - return - } - - // validate filterMode - switch filterMode { - case api.HostFilterModeAllowed: - case api.HostFilterModeBlocked: - case api.HostFilterModeAll: - default: - jc.Error(errors.New("invalid filter mode"), http.StatusBadRequest) + if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { return } // fetch hosts - hosts, err := b.hdb.Hosts(jc.Request.Context(), filterMode, offset, limit) + hosts, err := b.hdb.SearchHosts(jc.Request.Context(), api.HostFilterModeAllowed, "", nil, offset, limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { return } @@ -786,6 +774,10 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } + + // TODO: on the next major release we should: + // - remove api.DefaultSearchHostOptions and set defaults in the handler + // - validate the filter mode here and return a 400 hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.FilterMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 70c8b3431..1ebf14e1f 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -30,7 +30,7 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err } // Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.HostsOptions) (hosts []hostdb.HostInfo, err error) { +func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []hostdb.HostInfo, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index e371f01d4..06f7e133d 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -23,6 +23,8 @@ func TestBlocklist(t *testing.T) { hosts: 3, }) defer cluster.Shutdown() + + // convenience variables b := cluster.Bus tt := cluster.tt @@ -117,7 +119,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -142,7 +144,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -152,7 +154,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 65febbbf7..69ac90391 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.HostsOptions{}) + hosts, err := cluster.Bus.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index b5f6cccd0..060152167 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.HostsOptions{}) + hostss, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.HostsOptions{}) + hostss, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index 101ee298d..0a6fb00f6 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -530,11 +530,6 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains return hosts, err } -// Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) { - return ss.SearchHosts(ctx, filterMode, "", nil, offset, limit) -} - func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 528700502..735ea4190 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + allHosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -158,39 +158,65 @@ func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, setti }) } -// TestSQLHosts tests the Hosts method of the SQLHostDB type. -func TestSQLHosts(t *testing.T) { +// TestSearchHosts is a unit tests for the SearchHosts method. +func TestSearchHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) + // add 3 hosts + var hks []types.PublicKey + for i := 1; i <= 3; i++ { + if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i)); err != nil { + t.Fatal(err) + } + hks = append(hks, types.PublicKey{byte(i)}) } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + // assert defaults return all hosts + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, 1); err != nil || len(hosts) != 1 { + + // assert we can search using offset and limit + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.Hosts(ctx, api.HostFilterModeAllowed, -1, -1); err != ErrNegativeOffset { + if _, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } + // assert we can search by address + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by key + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by address and key + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by key and limit + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // add a custom host and block it hk4 := types.PublicKey{4} if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { @@ -201,13 +227,27 @@ func TestSQLHosts(t *testing.T) { } // assert host filter mode is applied - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAll, 0, -1); err != nil || len(hosts) != 4 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", nil, 0, -1); err != nil || len(hosts) != 4 { t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeBlocked, 0, -1); err != nil || len(hosts) != 1 { + } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeBlocked, "", nil, 0, -1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } +} + +// TestHostsForScanning is a unit test for the HostsForScanning method. +func TestHostsForScanning(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + ctx := context.Background() + + // add 3 hosts + hks, err := ss.addTestHosts(3) + if err != nil { + t.Fatal(err) + } + hk1, hk2, hk3 := hks[0], hks[1], hks[2] // add a scan for every non-blocked host n := time.Now() @@ -222,20 +262,19 @@ func TestSQLHosts(t *testing.T) { } // fetch all hosts using the HostsForScanning method - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 4) + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, -1) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 4 { + } else if len(hostAddresses) != 3 { t.Fatal("wrong number of addresses") - } else if hostAddresses[0].PublicKey != hk4 || - hostAddresses[1].PublicKey != hk3 || - hostAddresses[2].PublicKey != hk2 || - hostAddresses[3].PublicKey != hk1 { + } else if hostAddresses[0].PublicKey != hk3 || + hostAddresses[1].PublicKey != hk2 || + hostAddresses[2].PublicKey != hk1 { t.Fatal("wrong key") } - // fetch one host by setting the cutoff exactly to hk3 - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-3*time.Minute), 0, -1) + // fetch one host by setting the cutoff exactly to hk2 + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, -1) if err != nil { t.Fatal(err) } else if len(hostAddresses) != 1 { @@ -243,7 +282,7 @@ func TestSQLHosts(t *testing.T) { } // fetch no hosts - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) + hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, -1) if err != nil { t.Fatal(err) } else if len(hostAddresses) != 0 { @@ -251,40 +290,6 @@ func TestSQLHosts(t *testing.T) { } } -// TestSearchHosts is a unit test for SearchHosts. -func TestSearchHosts(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - // add 3 hosts - var hks []types.PublicKey - for i := 0; i < 3; i++ { - if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i+1)); err != nil { - t.Fatal(err) - } - hks = append(hks, types.PublicKey{byte(i)}) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // Search by address. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by address and key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by key and limit results - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } -} - // TestRecordScan is a test for recording scans. func TestRecordScan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) @@ -606,7 +611,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -778,7 +783,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } From ef357f6e14a7b02c610cfedf5be89caaf041cc5a Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:34:57 +0100 Subject: [PATCH 11/16] stores: revert changes --- api/host.go | 7 -- autopilot/autopilot.go | 2 +- autopilot/contractor.go | 2 +- internal/test/e2e/blocklist_test.go | 8 +- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 5 + stores/hostdb_test.go | 154 +++++++++++++--------------- 8 files changed, 82 insertions(+), 102 deletions(-) diff --git a/api/host.go b/api/host.go index 2a9df5f6b..5536f755c 100644 --- a/api/host.go +++ b/api/host.go @@ -89,13 +89,6 @@ type ( } ) -func DefaultSearchHostOptions() SearchHostOptions { - return SearchHostOptions{ - Limit: -1, - FilterMode: HostFilterModeAllowed, - } -} - func (opts GetHostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 3b90a8329..111dadb6c 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -195,7 +195,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) + hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) if jc.Check("failed to get hosts", err) != nil { return } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index ef64f630b..43ac5e629 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) + hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) if err != nil { return false, err } diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 06f7e133d..64acc2fba 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -23,8 +23,6 @@ func TestBlocklist(t *testing.T) { hosts: 3, }) defer cluster.Shutdown() - - // convenience variables b := cluster.Bus tt := cluster.tt @@ -119,7 +117,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -144,7 +142,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -154,7 +152,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 69ac90391..2346f7019 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index 060152167..de948c970 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index 0a6fb00f6..95e37a26c 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -530,6 +530,11 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains return hosts, err } +// Hosts returns non-blocked hosts at given offset and limit. +func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]hostdb.HostInfo, error) { + return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) +} + func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 735ea4190..35872ea2d 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + allHosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } @@ -158,98 +158,40 @@ func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, setti }) } -// TestSearchHosts is a unit tests for the SearchHosts method. -func TestSearchHosts(t *testing.T) { +// TestSQLHosts tests the Hosts method of the SQLHostDB type. +func TestSQLHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() - // add 3 hosts - var hks []types.PublicKey - for i := 1; i <= 3; i++ { - if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i)); err != nil { - t.Fatal(err) - } - hks = append(hks, types.PublicKey{byte(i)}) + hks, err := ss.addTestHosts(3) + if err != nil { + t.Fatal(err) } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - // assert defaults return all hosts - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { + // assert the hosts method returns the expected hosts + if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - - // assert we can search using offset and limit - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, -1, -1); err != ErrNegativeOffset { + if _, err := ss.Hosts(ctx, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } - // assert we can search by address - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by key - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by address and key - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by key and limit - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // add a custom host and block it - hk4 := types.PublicKey{4} - if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { - t.Fatal("unexpected", err) - } - if err := ss.UpdateHostBlocklistEntries(context.Background(), []string{"host4.com"}, nil, false); err != nil { - t.Fatal("unexpected", err) - } - - // assert host filter mode is applied - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", nil, 0, -1); err != nil || len(hosts) != 4 { - t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeBlocked, "", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { - t.Fatal("unexpected", len(hosts), err) - } -} - -// TestHostsForScanning is a unit test for the HostsForScanning method. -func TestHostsForScanning(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - // add 3 hosts - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // add a scan for every non-blocked host + // Add a scan for each host. n := time.Now() if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { t.Fatal(err) @@ -261,35 +203,77 @@ func TestHostsForScanning(t *testing.T) { t.Fatal(err) } - // fetch all hosts using the HostsForScanning method - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, -1) + // Fetch all hosts using the HostsForScanning method. + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 3 { + } + if len(hostAddresses) != 3 { t.Fatal("wrong number of addresses") - } else if hostAddresses[0].PublicKey != hk3 || - hostAddresses[1].PublicKey != hk2 || - hostAddresses[2].PublicKey != hk1 { + } + if hostAddresses[0].PublicKey != hk3 { + t.Fatal("wrong key") + } + if hostAddresses[1].PublicKey != hk2 { + t.Fatal("wrong key") + } + if hostAddresses[2].PublicKey != hk1 { t.Fatal("wrong key") } - // fetch one host by setting the cutoff exactly to hk2 - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, -1) + // Fetch one host by setting the cutoff exactly to hk2. + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 1 { + } + if len(hostAddresses) != 1 { t.Fatal("wrong number of addresses") } - // fetch no hosts - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, -1) + // Fetch no hosts. + hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 0 { + } + if len(hostAddresses) != 0 { t.Fatal("wrong number of addresses") } } +// TestSearchHosts is a unit test for SearchHosts. +func TestSearchHosts(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + ctx := context.Background() + + // add 3 hosts + var hks []types.PublicKey + for i := 0; i < 3; i++ { + if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i+1)); err != nil { + t.Fatal(err) + } + hks = append(hks, types.PublicKey{byte(i)}) + } + hk1, hk2, hk3 := hks[0], hks[1], hks[2] + + // Search by address. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by key. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by address and key. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by key and limit results + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } +} + // TestRecordScan is a test for recording scans. func TestRecordScan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) @@ -611,7 +595,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } @@ -783,7 +767,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } From e71862eef3798fe646fdb5060d87c3d1b3f17541 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:37:36 +0100 Subject: [PATCH 12/16] all: cleanup PR --- bus/bus.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 0a0614cbf..7d33964be 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -775,9 +775,9 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { return } - // TODO: on the next major release we should: - // - remove api.DefaultSearchHostOptions and set defaults in the handler - // - validate the filter mode here and return a 400 + // TODO: on the next major release + // - set defaults in handler + // - validate request params and return 400 if invalid hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.FilterMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return From f3e69a5715a73136944165fd6c8c97681e8988a7 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:44:57 +0100 Subject: [PATCH 13/16] linter: reconfigure gocritic --- .github/workflows/test.yml | 14 ++--- .golangci.yml | 86 ++++++++++++++++++++++--------- api/object.go | 4 +- autopilot/hosts.go | 2 +- internal/test/e2e/cluster_test.go | 11 +++- stores/metadata_test.go | 2 +- worker/mocks_test.go | 2 +- worker/rhpv2.go | 10 +++- worker/rhpv3.go | 7 ++- worker/worker.go | 3 +- 10 files changed, 97 insertions(+), 44 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bb56bbdb2..cc6c5c1f3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,13 +17,6 @@ jobs: - name: Configure Windows if: matrix.os == 'windows-latest' run: git config --global core.autocrlf false # fixes go lint fmt error - - name: Configure MySQL - if: matrix.os == 'ubuntu-latest' - uses: mirromutth/mysql-action@v1.1 - with: - host port: 3800 - mysql version: '8' - mysql root password: test - name: Checkout uses: actions/checkout@v3 - name: Setup Go @@ -43,6 +36,13 @@ jobs: autopilot bus bus/client worker worker/client + - name: Configure MySQL + if: matrix.os == 'ubuntu-latest' + uses: mirromutth/mysql-action@v1.1 + with: + host port: 3800 + mysql version: '8' + mysql root password: test - name: Test uses: n8maninger/action-golang-test@v1 with: diff --git a/.golangci.yml b/.golangci.yml index ace11db65..ad9bf1f64 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,13 +17,6 @@ run: # list of build tags, all linters use it. Default is empty list. build-tags: [] - # which dirs to skip: issues from them won't be reported; - # can use regexp here: generated.*, regexp is applied on full path; - # default value is empty list, but default dirs are skipped independently - # from this option's value (see skip-dirs-use-default). - skip-dirs: - - cover - # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true @@ -37,7 +30,7 @@ run: # output configuration options output: # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: colored-line-number + formats: colored-line-number # print lines of code with issue, default is true print-issued-lines: true @@ -61,23 +54,66 @@ linters-settings: # See https://go-critic.github.io/overview#checks-overview # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run` # By default list of stable checks is used. - enabled-checks: - - argOrder # Diagnostic options - - badCond - - caseOrder - - dupArg - - dupBranchBody - - dupCase - - dupSubExpr - - nilValReturn - - offBy1 - - weakCond - - boolExprSimplify # Style options here and below. - - builtinShadow - - emptyFallthrough - - hexLiteral - - underef - - equalFold + enabled-tags: + - diagnostic + - style + disabled-checks: + # diagnostic + - badRegexp + - badSorting + - badSyncOnceFunc + - builtinShadowDecl + - commentedOutCode + - dynamicFmtString + - emptyDecl + - evalOrder + - externalErrorReassign + - filepathJoin + - regexpPattern + - returnAfterHttpError + - sloppyReassign + - sortSlice + - sprintfQuotedString + - sqlQuery + - syncMapLoadAndDelete + - truncateCmp + - uncheckedInlineErr + - unnecessaryDefer + + # style + - commentedOutImport + - deferUnlambda + - docStub + - dupImport + - emptyStringTest + - exitAfterDefer + - exposedSyncMutex + - httpNoBody + - ifElseChain + - importShadow + - initClause + - methodExprCall + - nestingReduce + - octalLiteral + - paramTypeCombine + - preferFilepathJoin + - ptrToRefParam + - redundantSprint + - regexpSimplify + - ruleguard + - stringConcatSimplify + - stringsCompare + - timeExprSimplify + - todoCommentWithoutDetail + - tooManyResultsChecker + - typeAssertChain + - typeDefFirst + - typeUnparen + - unlabelStmt + - unnamedResult + - unnecessaryBlock + - whyNoLint + - yodaStyleExpr revive: ignore-generated-header: true rules: diff --git a/api/object.go b/api/object.go index 36cea9db8..b55191873 100644 --- a/api/object.go +++ b/api/object.go @@ -367,8 +367,8 @@ func (opts SearchObjectOptions) Apply(values url.Values) { } } -func FormatETag(ETag string) string { - return fmt.Sprintf("\"%s\"", ETag) +func FormatETag(eTag string) string { + return fmt.Sprintf("\"%s\"", eTag) } func ObjectPathEscape(path string) string { diff --git a/autopilot/hosts.go b/autopilot/hosts.go index 69fcf9776..aba45ee87 100644 --- a/autopilot/hosts.go +++ b/autopilot/hosts.go @@ -33,7 +33,7 @@ func (hosts scoredHosts) randSelectByScore(n int) (selected []scoredHost) { total += h.score } for i := range candidates { - candidates[i].score = candidates[i].score / total + candidates[i].score /= total } // select diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2346f7019..e081c2a5d 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2062,6 +2062,9 @@ func TestMultipartUploads(t *testing.T) { etag1 := putPart(1, 0, data1) etag3 := putPart(3, len(data1)+len(data2), data3) size := int64(len(data1) + len(data2) + len(data3)) + expectedData := data1 + expectedData = append(expectedData, data2...) + expectedData = append(expectedData, data3...) // List parts mup, err := b.MultipartUploadParts(context.Background(), api.DefaultBucketName, objPath, mpr.UploadID, 0, 0) @@ -2118,7 +2121,7 @@ func TestMultipartUploads(t *testing.T) { t.Fatal("unexpected size:", gor.Size) } else if data, err := io.ReadAll(gor.Content); err != nil { t.Fatal(err) - } else if expectedData := append(data1, append(data2, data3...)...); !bytes.Equal(data, expectedData) { + } else if !bytes.Equal(data, expectedData) { t.Fatal("unexpected data:", cmp.Diff(data, expectedData)) } @@ -2417,6 +2420,11 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { }) tt.OK(err) + // combine all parts data + expectedData := part1Data + expectedData = append(expectedData, part2Data...) + expectedData = append(expectedData, part3Data...) + // finish the upload tt.OKAll(b.CompleteMultipartUpload(context.Background(), api.DefaultBucketName, objPath, mpr.UploadID, []api.MultipartCompletedPart{ { @@ -2436,7 +2444,6 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { // download the object and verify its integrity dst := new(bytes.Buffer) tt.OK(w.DownloadObject(context.Background(), dst, api.DefaultBucketName, objPath, api.DownloadObjectOptions{})) - expectedData := append(part1Data, append(part2Data, part3Data...)...) receivedData := dst.Bytes() if len(receivedData) != len(expectedData) { t.Fatalf("expected %v bytes, got %v", len(expectedData), len(receivedData)) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index c6ac1cd52..c16f927d1 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4538,7 +4538,7 @@ func TestTypeCurrency(t *testing.T) { var result bool query := fmt.Sprintf("SELECT ? %s ?", test.cmp) if !isSQLite(ss.db) { - query = strings.Replace(query, "?", "HEX(?)", -1) + query = strings.ReplaceAll(query, "?", "HEX(?)") } if err := ss.db.Raw(query, test.a, test.b).Scan(&result).Error; err != nil { t.Fatal(err) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 4f7c24b8f..7b3609c0b 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -375,7 +375,7 @@ func newObjectStoreMock(bucket string) *objectStoreMock { return os } -func (os *objectStoreMock) AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (os *objectStoreMock) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { return nil } diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 9f05904a4..beab25a65 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -217,8 +217,14 @@ func RPCFormContract(ctx context.Context, t *rhpv2.Transport, renterKey types.Pr return rhpv2.ContractRevision{}, nil, err } - txn.Signatures = append(renterContractSignatures, hostSigs.ContractSignatures...) - signedTxnSet := append(resp.Parents, append(parents, txn)...) + txn.Signatures = make([]types.TransactionSignature, 0, len(renterContractSignatures)+len(hostSigs.ContractSignatures)) + txn.Signatures = append(txn.Signatures, renterContractSignatures...) + txn.Signatures = append(txn.Signatures, hostSigs.ContractSignatures...) + + signedTxnSet := make([]types.Transaction, 0, len(resp.Parents)+len(parents)+1) + signedTxnSet = append(signedTxnSet, resp.Parents...) + signedTxnSet = append(signedTxnSet, parents...) + signedTxnSet = append(signedTxnSet, txn) return rhpv2.ContractRevision{ Revision: initRevision, Signatures: [2]types.TransactionSignature{ diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 8db6dc9d5..c0404b128 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -511,7 +511,9 @@ func (a *accounts) deriveAccountKey(hostKey types.PublicKey) types.PrivateKey { // Append the host for which to create it and the index to the // corresponding sub-key. subKey := a.key - data := append(subKey, hostKey[:]...) + data := make([]byte, 0, len(subKey)+len(hostKey)+1) + data = append(data, subKey[:]...) + data = append(data, hostKey[:]...) data = append(data, index) seed := types.HashBytes(data) @@ -1078,7 +1080,8 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor txn.Signatures = append(txn.Signatures, hostSigs.TransactionSignatures...) // Add the parents to get the full txnSet. - txnSet = append(parents, txn) + txnSet = parents + txnSet = append(txnSet, txn) return rhpv2.ContractRevision{ Revision: noOpRevision, diff --git a/worker/worker.go b/worker/worker.go index 0868c347c..707a6a7f1 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -506,7 +506,8 @@ func (w *worker) rhpBroadcastHandler(jc jape.Context) { return } // Broadcast the txn. - txnSet := append(parents, txn) + txnSet := parents + txnSet = append(txnSet, txn) err = w.bus.BroadcastTransaction(ctx, txnSet) if jc.Check("failed to broadcast transaction", err) != nil { _ = w.bus.WalletDiscard(ctx, txn) From 89850c8cb18cb8bb807e5d19132817c7b1720485 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 11:09:27 +0100 Subject: [PATCH 14/16] testing: cleanup TestMigrations --- internal/test/e2e/migrations_test.go | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index 3e99f30a7..b049da908 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -125,28 +125,18 @@ func TestMigrations(t *testing.T) { t.Fatal("unexpected", ress) } - // prepare - // remove all hosts to ensure migrations fail for _, h := range cluster.hosts { cluster.RemoveHost(h) } // fetch alerts and collect object ids until we found two - seen := make(map[types.Hash256]struct{}) - got := make(map[string][]string) + var got map[string][]string tt.Retry(100, 100*time.Millisecond, func() error { - ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) - if ress.Totals.Error+ress.Totals.Critical == 0 { - return errors.New("no migration alerts") - } + got = make(map[string][]string) + ress, err := b.Alerts(context.Background(), alerts.AlertsOpts{}) + tt.OK(err) for _, alert := range ress.Alerts { - // skip if already seen - if _, skip := seen[alert.ID]; skip { - continue - } - seen[alert.ID] = struct{}{} - // skip if not a migration alert data, ok := alert.Data["objectIDs"].(map[string]interface{}) if !ok { @@ -158,14 +148,14 @@ func TestMigrations(t *testing.T) { if objectIDs, ok := ids.([]interface{}); ok { for _, id := range objectIDs { got[bucket] = append(got[bucket], id.(string)) - if len(got) == 2 { - return nil - } } } } } - return errors.New("haven't found two migration alerts yet") + if len(got) != 2 { + return errors.New("unexpected number of buckets") + } + return nil }) // assert we found our two objects across two buckets From d5ae293472795e65ddfabbcfb036a81850c665e7 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 11:20:06 +0100 Subject: [PATCH 15/16] linter: drop default format --- .golangci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ad9bf1f64..017b68431 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -29,9 +29,6 @@ run: # output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - formats: colored-line-number - # print lines of code with issue, default is true print-issued-lines: true From 1252ed455b79774b5fd8f7b414af94f5b64c5b97 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 11:31:55 +0100 Subject: [PATCH 16/16] linter: enable all the things --- .golangci.yml | 38 -------------------------------------- api/host.go | 2 +- api/object.go | 2 +- api/param.go | 2 +- api/setting.go | 4 ++-- bus/client/metrics.go | 4 ++-- bus/client/slabs.go | 2 +- bus/client/wallet.go | 2 +- cmd/renterd/config.go | 2 +- cmd/renterd/main.go | 4 ++-- hostdb/hostdb.go | 2 +- worker/client/client.go | 4 ++-- 12 files changed, 15 insertions(+), 53 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 017b68431..d439ef177 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -56,61 +56,23 @@ linters-settings: - style disabled-checks: # diagnostic - - badRegexp - - badSorting - - badSyncOnceFunc - - builtinShadowDecl - commentedOutCode - - dynamicFmtString - - emptyDecl - - evalOrder - - externalErrorReassign - - filepathJoin - - regexpPattern - - returnAfterHttpError - - sloppyReassign - - sortSlice - - sprintfQuotedString - - sqlQuery - - syncMapLoadAndDelete - - truncateCmp - uncheckedInlineErr - - unnecessaryDefer # style - - commentedOutImport - - deferUnlambda - - docStub - - dupImport - - emptyStringTest - exitAfterDefer - - exposedSyncMutex - - httpNoBody - ifElseChain - importShadow - - initClause - - methodExprCall - - nestingReduce - octalLiteral - paramTypeCombine - - preferFilepathJoin - ptrToRefParam - - redundantSprint - - regexpSimplify - - ruleguard - - stringConcatSimplify - stringsCompare - - timeExprSimplify - - todoCommentWithoutDetail - tooManyResultsChecker - - typeAssertChain - typeDefFirst - typeUnparen - unlabelStmt - unnamedResult - - unnecessaryBlock - whyNoLint - - yodaStyleExpr revive: ignore-generated-header: true rules: diff --git a/api/host.go b/api/host.go index aea80a9fe..e1618397a 100644 --- a/api/host.go +++ b/api/host.go @@ -112,6 +112,6 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { values.Set("limit", fmt.Sprint(opts.Limit)) } if !opts.MaxLastScan.IsZero() { - values.Set("lastScan", fmt.Sprint(TimeRFC3339(opts.MaxLastScan))) + values.Set("lastScan", TimeRFC3339(opts.MaxLastScan).String()) } } diff --git a/api/object.go b/api/object.go index b55191873..0382f69a7 100644 --- a/api/object.go +++ b/api/object.go @@ -368,7 +368,7 @@ func (opts SearchObjectOptions) Apply(values url.Values) { } func FormatETag(eTag string) string { - return fmt.Sprintf("\"%s\"", eTag) + return fmt.Sprintf("%q", eTag) } func ObjectPathEscape(path string) string { diff --git a/api/param.go b/api/param.go index 7e9ef6e75..c2268ca30 100644 --- a/api/param.go +++ b/api/param.go @@ -105,7 +105,7 @@ func (t *TimeRFC3339) UnmarshalText(b []byte) error { // MarshalJSON implements json.Marshaler. func (t TimeRFC3339) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, (time.Time)(t).UTC().Format(time.RFC3339Nano))), nil + return []byte(fmt.Sprintf("%q", (time.Time)(t).UTC().Format(time.RFC3339Nano))), nil } // String implements fmt.Stringer. diff --git a/api/setting.go b/api/setting.go index 47785c9aa..02efe6b5d 100644 --- a/api/setting.go +++ b/api/setting.go @@ -155,11 +155,11 @@ func (rs RedundancySettings) Validate() error { // valid. func (s3as S3AuthenticationSettings) Validate() error { for accessKeyID, secretAccessKey := range s3as.V4Keypairs { - if len(accessKeyID) == 0 { + if accessKeyID == "" { return fmt.Errorf("AccessKeyID cannot be empty") } else if len(accessKeyID) < S3MinAccessKeyLen || len(accessKeyID) > S3MaxAccessKeyLen { return fmt.Errorf("AccessKeyID must be between %d and %d characters long but was %d", S3MinAccessKeyLen, S3MaxAccessKeyLen, len(accessKeyID)) - } else if len(secretAccessKey) == 0 { + } else if secretAccessKey == "" { return fmt.Errorf("SecretAccessKey cannot be empty") } else if len(secretAccessKey) != S3SecretKeyLen { return fmt.Errorf("SecretAccessKey must be %d characters long but was %d", S3SecretKeyLen, len(secretAccessKey)) diff --git a/bus/client/metrics.go b/bus/client/metrics.go index dce120ca8..10bc2fbca 100644 --- a/bus/client/metrics.go +++ b/bus/client/metrics.go @@ -125,7 +125,7 @@ func (c *Client) PruneMetrics(ctx context.Context, metric string, cutoff time.Ti panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), http.NoBody) if err != nil { panic(err) } @@ -180,7 +180,7 @@ func (c *Client) metric(ctx context.Context, key string, values url.Values, res panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) if err != nil { panic(err) } diff --git a/bus/client/slabs.go b/bus/client/slabs.go index e407e0360..db5c0023a 100644 --- a/bus/client/slabs.go +++ b/bus/client/slabs.go @@ -63,7 +63,7 @@ func (c *Client) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) if err != nil { panic(err) } diff --git a/bus/client/wallet.go b/bus/client/wallet.go index 0d4761e51..db9ab4239 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -149,7 +149,7 @@ func (c *Client) WalletTransactions(ctx context.Context, opts ...api.WalletTrans panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) if err != nil { panic(err) } diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index f9008a4d5..ec153f452 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -376,7 +376,7 @@ func cmdBuildConfig() { // write the config file configPath := "renterd.yml" - if str := os.Getenv("RENTERD_CONFIG_FILE"); len(str) != 0 { + if str := os.Getenv("RENTERD_CONFIG_FILE"); str != "" { configPath = str } diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 093747796..ab1cc67e0 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -139,7 +139,7 @@ func check(context string, err error) { } func mustLoadAPIPassword() { - if len(cfg.HTTP.Password) != 0 { + if cfg.HTTP.Password != "" { return } @@ -192,7 +192,7 @@ func mustParseWorkers(workers, password string) { // loaded. func tryLoadConfig() { configPath := "renterd.yml" - if str := os.Getenv("RENTERD_CONFIG_FILE"); len(str) != 0 { + if str := os.Getenv("RENTERD_CONFIG_FILE"); str != "" { configPath = str } diff --git a/hostdb/hostdb.go b/hostdb/hostdb.go index c1b4769d6..69ed80989 100644 --- a/hostdb/hostdb.go +++ b/hostdb/hostdb.go @@ -38,7 +38,7 @@ func ForEachAnnouncement(b types.Block, height uint64, fn func(types.PublicKey, // verify signature var hostKey types.PublicKey copy(hostKey[:], ha.PublicKey.Key) - annHash := types.Hash256(crypto.HashObject(ha.HostAnnouncement)) // TODO + annHash := types.Hash256(crypto.HashObject(ha.HostAnnouncement)) if !hostKey.VerifyHash(annHash, ha.Signature) { continue } diff --git a/worker/client/client.go b/worker/client/client.go index 6ef70f338..d658ac027 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -88,7 +88,7 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H path += "?" + values.Encode() // TODO: support HEAD in jape client - req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), nil) + req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) if err != nil { panic(err) } @@ -271,7 +271,7 @@ func (c *Client) object(ctx context.Context, bucket, path string, opts api.Downl path += "?" + values.Encode() c.c.Custom("GET", fmt.Sprintf("/objects/%s", path), nil, (*[]api.ObjectMetadata)(nil)) - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) if err != nil { panic(err) }