From 82d74327861b6249e68eb4509932094e1d898387 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 10:55:04 +0100 Subject: [PATCH 1/5] bus: add filter mode to HostsOptions --- api/host.go | 12 ++++-- autopilot/autopilot.go | 12 +++--- autopilot/autopilot_test.go | 47 ++++++++++---------- autopilot/contractor.go | 10 ++--- autopilot/hostfilter.go | 6 ++- autopilot/hostinfo.go | 8 ++-- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 10 +++-- bus/bus.go | 21 +++++++-- bus/client/hosts.go | 4 +- internal/test/e2e/blocklist_test.go | 6 +-- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 30 +++++++++---- stores/hostdb_test.go | 67 +++++++++++++++++------------ 15 files changed, 146 insertions(+), 95 deletions(-) diff --git a/api/host.go b/api/host.go index aea80a9fe..ba66ffd58 100644 --- a/api/host.go +++ b/api/host.go @@ -70,9 +70,10 @@ type ( // Option types. type ( - GetHostsOptions struct { - Offset int - Limit int + HostsOptions struct { + Offset int + Limit int + FilterMode string } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 @@ -95,13 +96,16 @@ func DefaultSearchHostOptions() SearchHostOptions { } } -func (opts GetHostsOptions) Apply(values url.Values) { +func (opts HostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } + if opts.FilterMode != "" { + values.Set("filterMode", opts.FilterMode) + } } func (opts HostsForScanningOptions) Apply(values url.Values) { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..d8a760265 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -54,10 +54,10 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) + Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.Host, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) // metrics RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error @@ -196,7 +196,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) + hosts, err := ap.bus.Hosts(ctx, api.HostsOptions{}) if jc.Check("failed to get hosts", err) != nil { return } @@ -735,7 +735,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { jc.Encode(hosts) } -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) @@ -749,7 +749,7 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // evaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) resp.Hosts = uint64(len(hosts)) @@ -865,7 +865,7 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // optimiseGougingSetting tries to optimise one field of the gouging settings to // try and hit the target number of contracts. -func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { if cfg.Contracts.Amount == 0 { return true // nothing to do } diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index f818c312b..9ebafe675 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -14,31 +14,34 @@ import ( func TestOptimiseGougingSetting(t *testing.T) { // create 10 hosts that should all be usable - var hosts []hostdb.Host + var hosts []hostdb.HostInfo for i := 0; i < 10; i++ { - hosts = append(hosts, hostdb.Host{ - KnownSince: time.Unix(0, 0), - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{ - CollateralCost: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), + hosts = append(hosts, hostdb.HostInfo{ + Host: hostdb.Host{ + KnownSince: time.Unix(0, 0), + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + }, }, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, }, - Settings: rhpv2.HostSettings{ - AcceptingContracts: true, - Collateral: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - Version: "1.6.0", - }, - Interactions: hostdb.Interactions{ - Uptime: time.Hour * 1000, - LastScan: time.Now(), - LastScanSuccess: true, - SecondToLastScanSuccess: true, - TotalScans: 100, - }, - LastAnnouncement: time.Unix(0, 0), - Scanned: true, + Blocked: false, }) } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 83e12a206..47a03480f 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.Hosts(ctx, api.GetHostsOptions{}) + hosts, err := c.ap.bus.Hosts(ctx, api.HostsOptions{}) if err != nil { return false, err } @@ -777,7 +777,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.Host, minScore, contract.FileSize()) + usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, contract.FileSize()) if !usable { reasons := unusableResult.reasons() toStopUsing[fcid] = strings.Join(reasons, ",") @@ -1297,7 +1297,7 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { start := time.Now() // fetch consensus state @@ -1311,7 +1311,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) // select unused hosts that passed a scan - var unused []hostdb.Host + var unused []hostdb.HostInfo var excluded, notcompletedscan int for _, h := range hosts { // filter out used hosts @@ -1348,7 +1348,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us h.PriceTable.HostBlockHeight = cs.BlockHeight usable, result := isUsableHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) if usable { - candidates = append(candidates, scoredHost{h, result.scoreBreakdown.Score()}) + candidates = append(candidates, scoredHost{h.Host, result.scoreBreakdown.Score()}) continue } diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 574862a97..6f8e4f747 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -176,7 +176,7 @@ func (u *unusableHostResult) keysAndValues() []interface{} { // isUsableHost returns whether the given host is usable along with a list of // reasons why it was deemed unusable. -func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.Host, minScore float64, storedData uint64) (bool, unusableHostResult) { +func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.HostInfo, minScore float64, storedData uint64) (bool, unusableHostResult) { if rs.Validate() != nil { panic("invalid redundancy settings were supplied - developer error") } @@ -187,6 +187,8 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. if !h.IsAnnounced() { errs = append(errs, errHostNotAnnounced) + } else if h.Blocked { + errs = append(errs, errHostBlocked) } else if !h.Scanned { errs = append(errs, errHostNotCompletingScan) } else { @@ -211,7 +213,7 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. // not gouging, this because the core package does not have overflow // checks in its cost calculations needed to calculate the period // cost - scoreBreakdown = hostScore(cfg, h, storedData, rs.Redundancy()) + scoreBreakdown = hostScore(cfg, h.Host, storedData, rs.Redundancy()) if scoreBreakdown.Score() < minScore { errs = append(errs, fmt.Errorf("%w: (%s): %v < %v", errLowScore, scoreBreakdown.String(), scoreBreakdown.Score(), minScore)) } diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go index 82efa1d61..e0cbecadc 100644 --- a/autopilot/hostinfo.go +++ b/autopilot/hostinfo.go @@ -53,7 +53,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.Host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host, minScore, storedData) return api.HostHandlerResponse{ Host: host.Host, Checks: &api.HostHandlerResponseChecks{ @@ -67,7 +67,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api }, nil } -func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.Host) (hi hostInfo, found bool) { +func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo) (hi hostInfo, found bool) { // grab host details from cache c.mu.Lock() hi, found = c.cachedHostInfo[host.PublicKey] @@ -157,7 +157,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a // set IsChecked = false. if usabilityMode == api.UsabilityFilterModeAll { hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, + Host: host.Host, }) if wanted > 0 && len(hostInfos) == wanted { return hostInfos, nil // we're done. @@ -170,7 +170,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a continue } hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, + Host: host.Host, Checks: &api.HostHandlerResponseChecks{ Gouging: hi.UnusableResult.gougingBreakdown.Gouging(), GougingBreakdown: hi.UnusableResult.gougingBreakdown, diff --git a/autopilot/scanner.go b/autopilot/scanner.go index bb21e5022..85301822c 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) + Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 6214ec4a1..481b78046 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) { +func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -32,11 +32,15 @@ func (b *mockBus) Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb end = len(b.hosts) } - return b.hosts[start:end], nil + his := make([]hostdb.HostInfo, len(b.hosts[start:end])) + for i, h := range b.hosts[start:end] { + his[i] = hostdb.HostInfo{Host: h} + } + return his, nil } func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { - hosts, err := b.Hosts(ctx, api.GetHostsOptions{ + hosts, err := b.Hosts(ctx, api.HostsOptions{ Offset: opts.Offset, Limit: opts.Limit, }) diff --git a/bus/bus.go b/bus/bus.go index d8b3fdfc5..3838a1877 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -92,13 +92,13 @@ type ( // A HostDB stores information about hosts. HostDB interface { Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, offset, limit int) ([]hostdb.Host, error) + Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error - SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.Host, error) + SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) @@ -758,10 +758,23 @@ func (b *bus) walletPendingHandler(jc jape.Context) { func (b *bus) hostsHandlerGET(jc jape.Context) { offset := 0 limit := -1 - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { + filterMode := api.HostFilterModeAllowed + if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("filterMode", &filterMode) != nil { return } - hosts, err := b.hdb.Hosts(jc.Request.Context(), offset, limit) + + // validate filterMode + switch filterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + default: + jc.Error(errors.New("invalid filter mode"), http.StatusBadRequest) + return + } + + // fetch hosts + hosts, err := b.hdb.Hosts(jc.Request.Context(), filterMode, offset, limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { return } diff --git a/bus/client/hosts.go b/bus/client/hosts.go index ecf44e52b..70c8b3431 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -30,7 +30,7 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err } // Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []hostdb.Host, err error) { +func (c *Client) Hosts(ctx context.Context, opts api.HostsOptions) (hosts []hostdb.HostInfo, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) @@ -78,7 +78,7 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) } // SearchHosts returns all hosts that match certain search criteria. -func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []hostdb.Host, err error) { +func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []hostdb.HostInfo, err error) { err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ Offset: opts.Offset, Limit: opts.Limit, diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 64acc2fba..e371f01d4 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -117,7 +117,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -142,7 +142,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -152,7 +152,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2346f7019..65febbbf7 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := cluster.Bus.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index de948c970..b5f6cccd0 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err := b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index fd23abf4a..101ee298d 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -461,23 +461,25 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, return hostAddresses, err } -func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.Host, error) { +func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) { if offset < 0 { return nil, ErrNegativeOffset } - var hosts []hostdb.Host - var fullHosts []dbHost - // Apply filter mode. + var blocked bool query := ss.db switch filterMode { case api.HostFilterModeAllowed: query = query.Scopes(ss.excludeBlocked) case api.HostFilterModeBlocked: query = query.Scopes(ss.excludeAllowed) + blocked = true case api.HostFilterModeAll: - // nothing to do + // preload allowlist and blocklist + query = query. + Preload("Allowlist"). + Preload("Blocklist") default: return nil, fmt.Errorf("invalid filter mode: %v", filterMode) } @@ -500,12 +502,24 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains }) } + var hosts []hostdb.HostInfo + var fullHosts []dbHost err := query. Offset(offset). Limit(limit). FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { for _, fh := range fullHosts { - hosts = append(hosts, fh.convert()) + if filterMode == api.HostFilterModeAll { + hosts = append(hosts, hostdb.HostInfo{ + Host: fh.convert(), + Blocked: ss.isBlocked(fh), + }) + } else { + hosts = append(hosts, hostdb.HostInfo{ + Host: fh.convert(), + Blocked: blocked, + }) + } } return nil }). @@ -517,8 +531,8 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains } // Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]hostdb.Host, error) { - return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) +func (ss *SQLStore) Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) { + return ss.SearchHosts(ctx, filterMode, "", nil, offset, limit) } func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 35872ea2d..528700502 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, 0, -1) + allHosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } @@ -171,27 +171,45 @@ func TestSQLHosts(t *testing.T) { hk1, hk2, hk3 := hks[0], hks[1], hks[2] // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.Hosts(ctx, -1, -1); err != ErrNegativeOffset { + if _, err := ss.Hosts(ctx, api.HostFilterModeAllowed, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } - // Add a scan for each host. + // add a custom host and block it + hk4 := types.PublicKey{4} + if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { + t.Fatal("unexpected", err) + } + if err := ss.UpdateHostBlocklistEntries(context.Background(), []string{"host4.com"}, nil, false); err != nil { + t.Fatal("unexpected", err) + } + + // assert host filter mode is applied + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAll, 0, -1); err != nil || len(hosts) != 4 { + t.Fatal("unexpected", len(hosts), err) + } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeBlocked, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + t.Fatal("unexpected", len(hosts), err) + } + + // add a scan for every non-blocked host n := time.Now() if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { t.Fatal(err) @@ -203,39 +221,32 @@ func TestSQLHosts(t *testing.T) { t.Fatal(err) } - // Fetch all hosts using the HostsForScanning method. - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) + // fetch all hosts using the HostsForScanning method + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 4) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 3 { + } else if len(hostAddresses) != 4 { t.Fatal("wrong number of addresses") - } - if hostAddresses[0].PublicKey != hk3 { - t.Fatal("wrong key") - } - if hostAddresses[1].PublicKey != hk2 { - t.Fatal("wrong key") - } - if hostAddresses[2].PublicKey != hk1 { + } else if hostAddresses[0].PublicKey != hk4 || + hostAddresses[1].PublicKey != hk3 || + hostAddresses[2].PublicKey != hk2 || + hostAddresses[3].PublicKey != hk1 { t.Fatal("wrong key") } - // Fetch one host by setting the cutoff exactly to hk2. - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) + // fetch one host by setting the cutoff exactly to hk3 + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-3*time.Minute), 0, -1) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 1 { + } else if len(hostAddresses) != 1 { t.Fatal("wrong number of addresses") } - // Fetch no hosts. + // fetch no hosts hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 0 { + } else if len(hostAddresses) != 0 { t.Fatal("wrong number of addresses") } } @@ -595,7 +606,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } @@ -767,7 +778,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } From dabe9838bf48351cdbfe376d2ce370fe1ed4db0e Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 11:21:01 +0100 Subject: [PATCH 2/5] autopilot: update host filter --- autopilot/hostfilter.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 6f8e4f747..8de37221a 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -182,13 +182,14 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. } var errs []error + if h.Blocked { + errs = append(errs, errHostBlocked) + } + var gougingBreakdown api.HostGougingBreakdown var scoreBreakdown api.HostScoreBreakdown - if !h.IsAnnounced() { errs = append(errs, errHostNotAnnounced) - } else if h.Blocked { - errs = append(errs, errHostBlocked) } else if !h.Scanned { errs = append(errs, errHostNotCompletingScan) } else { From 36021ab5deef1755269106ba6b7ca0a3afb7c695 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 21:28:33 +0100 Subject: [PATCH 3/5] bus: deprecate /hosts endpoint infavour of /search/hosts --- api/host.go | 15 ++-- autopilot/autopilot.go | 3 +- autopilot/contractor.go | 2 +- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 4 +- bus/bus.go | 24 ++---- bus/client/hosts.go | 2 +- internal/test/e2e/blocklist_test.go | 8 +- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 5 -- stores/hostdb_test.go | 125 +++++++++++++++------------- 12 files changed, 93 insertions(+), 103 deletions(-) diff --git a/api/host.go b/api/host.go index ba66ffd58..2a9df5f6b 100644 --- a/api/host.go +++ b/api/host.go @@ -70,16 +70,16 @@ type ( // Option types. type ( - HostsOptions struct { - Offset int - Limit int - FilterMode string + GetHostsOptions struct { + Offset int + Limit int } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 Limit int Offset int } + SearchHostOptions struct { AddressContains string FilterMode string @@ -92,20 +92,17 @@ type ( func DefaultSearchHostOptions() SearchHostOptions { return SearchHostOptions{ Limit: -1, - FilterMode: HostFilterModeAll, + FilterMode: HostFilterModeAllowed, } } -func (opts HostsOptions) Apply(values url.Values) { +func (opts GetHostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } - if opts.FilterMode != "" { - values.Set("filterMode", opts.FilterMode) - } } func (opts HostsForScanningOptions) Apply(values url.Values) { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index d8a760265..3b90a8329 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -54,7 +54,6 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) @@ -196,7 +195,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.Hosts(ctx, api.HostsOptions{}) + hosts, err := ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) if jc.Check("failed to get hosts", err) != nil { return } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 47a03480f..ef64f630b 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.Hosts(ctx, api.HostsOptions{}) + hosts, err := c.ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) if err != nil { return false, err } diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 85301822c..76643e5b5 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 481b78046..1cdd096d2 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) { +func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -40,7 +40,7 @@ func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.Ho } func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { - hosts, err := b.Hosts(ctx, api.HostsOptions{ + hosts, err := b.SearchHosts(ctx, api.SearchHostOptions{ Offset: opts.Offset, Limit: opts.Limit, }) diff --git a/bus/bus.go b/bus/bus.go index 3838a1877..0a0614cbf 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -92,7 +92,6 @@ type ( // A HostDB stores information about hosts. HostDB interface { Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error @@ -285,7 +284,7 @@ func (b *bus) Handler() http.Handler { "GET /contract/:id/roots": b.contractIDRootsHandlerGET, "GET /contract/:id/size": b.contractSizeHandlerGET, - "GET /hosts": b.hostsHandlerGET, + "GET /hosts": b.hostsHandlerGETDeprecated, "GET /hosts/allowlist": b.hostsAllowlistHandlerGET, "PUT /hosts/allowlist": b.hostsAllowlistHandlerPUT, "GET /hosts/blocklist": b.hostsBlocklistHandlerGET, @@ -755,26 +754,15 @@ func (b *bus) walletPendingHandler(jc jape.Context) { jc.Encode(relevant) } -func (b *bus) hostsHandlerGET(jc jape.Context) { +func (b *bus) hostsHandlerGETDeprecated(jc jape.Context) { offset := 0 limit := -1 - filterMode := api.HostFilterModeAllowed - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("filterMode", &filterMode) != nil { - return - } - - // validate filterMode - switch filterMode { - case api.HostFilterModeAllowed: - case api.HostFilterModeBlocked: - case api.HostFilterModeAll: - default: - jc.Error(errors.New("invalid filter mode"), http.StatusBadRequest) + if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { return } // fetch hosts - hosts, err := b.hdb.Hosts(jc.Request.Context(), filterMode, offset, limit) + hosts, err := b.hdb.SearchHosts(jc.Request.Context(), api.HostFilterModeAllowed, "", nil, offset, limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { return } @@ -786,6 +774,10 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } + + // TODO: on the next major release we should: + // - remove api.DefaultSearchHostOptions and set defaults in the handler + // - validate the filter mode here and return a 400 hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.FilterMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 70c8b3431..1ebf14e1f 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -30,7 +30,7 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err } // Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.HostsOptions) (hosts []hostdb.HostInfo, err error) { +func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []hostdb.HostInfo, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index e371f01d4..06f7e133d 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -23,6 +23,8 @@ func TestBlocklist(t *testing.T) { hosts: 3, }) defer cluster.Shutdown() + + // convenience variables b := cluster.Bus tt := cluster.tt @@ -117,7 +119,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -142,7 +144,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -152,7 +154,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 65febbbf7..69ac90391 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.HostsOptions{}) + hosts, err := cluster.Bus.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index b5f6cccd0..060152167 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.HostsOptions{}) + hostss, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.HostsOptions{}) + hostss, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index 101ee298d..0a6fb00f6 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -530,11 +530,6 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains return hosts, err } -// Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) { - return ss.SearchHosts(ctx, filterMode, "", nil, offset, limit) -} - func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 528700502..735ea4190 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + allHosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -158,39 +158,65 @@ func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, setti }) } -// TestSQLHosts tests the Hosts method of the SQLHostDB type. -func TestSQLHosts(t *testing.T) { +// TestSearchHosts is a unit tests for the SearchHosts method. +func TestSearchHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) + // add 3 hosts + var hks []types.PublicKey + for i := 1; i <= 3; i++ { + if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i)); err != nil { + t.Fatal(err) + } + hks = append(hks, types.PublicKey{byte(i)}) } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + // assert defaults return all hosts + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, 1); err != nil || len(hosts) != 1 { + + // assert we can search using offset and limit + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.Hosts(ctx, api.HostFilterModeAllowed, -1, -1); err != ErrNegativeOffset { + if _, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } + // assert we can search by address + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by key + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by address and key + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by key and limit + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // add a custom host and block it hk4 := types.PublicKey{4} if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { @@ -201,13 +227,27 @@ func TestSQLHosts(t *testing.T) { } // assert host filter mode is applied - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAll, 0, -1); err != nil || len(hosts) != 4 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", nil, 0, -1); err != nil || len(hosts) != 4 { t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeBlocked, 0, -1); err != nil || len(hosts) != 1 { + } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeBlocked, "", nil, 0, -1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } +} + +// TestHostsForScanning is a unit test for the HostsForScanning method. +func TestHostsForScanning(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + ctx := context.Background() + + // add 3 hosts + hks, err := ss.addTestHosts(3) + if err != nil { + t.Fatal(err) + } + hk1, hk2, hk3 := hks[0], hks[1], hks[2] // add a scan for every non-blocked host n := time.Now() @@ -222,20 +262,19 @@ func TestSQLHosts(t *testing.T) { } // fetch all hosts using the HostsForScanning method - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 4) + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, -1) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 4 { + } else if len(hostAddresses) != 3 { t.Fatal("wrong number of addresses") - } else if hostAddresses[0].PublicKey != hk4 || - hostAddresses[1].PublicKey != hk3 || - hostAddresses[2].PublicKey != hk2 || - hostAddresses[3].PublicKey != hk1 { + } else if hostAddresses[0].PublicKey != hk3 || + hostAddresses[1].PublicKey != hk2 || + hostAddresses[2].PublicKey != hk1 { t.Fatal("wrong key") } - // fetch one host by setting the cutoff exactly to hk3 - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-3*time.Minute), 0, -1) + // fetch one host by setting the cutoff exactly to hk2 + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, -1) if err != nil { t.Fatal(err) } else if len(hostAddresses) != 1 { @@ -243,7 +282,7 @@ func TestSQLHosts(t *testing.T) { } // fetch no hosts - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) + hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, -1) if err != nil { t.Fatal(err) } else if len(hostAddresses) != 0 { @@ -251,40 +290,6 @@ func TestSQLHosts(t *testing.T) { } } -// TestSearchHosts is a unit test for SearchHosts. -func TestSearchHosts(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - // add 3 hosts - var hks []types.PublicKey - for i := 0; i < 3; i++ { - if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i+1)); err != nil { - t.Fatal(err) - } - hks = append(hks, types.PublicKey{byte(i)}) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // Search by address. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by address and key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by key and limit results - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } -} - // TestRecordScan is a test for recording scans. func TestRecordScan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) @@ -606,7 +611,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -778,7 +783,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } From ef357f6e14a7b02c610cfedf5be89caaf041cc5a Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:34:57 +0100 Subject: [PATCH 4/5] stores: revert changes --- api/host.go | 7 -- autopilot/autopilot.go | 2 +- autopilot/contractor.go | 2 +- internal/test/e2e/blocklist_test.go | 8 +- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 5 + stores/hostdb_test.go | 154 +++++++++++++--------------- 8 files changed, 82 insertions(+), 102 deletions(-) diff --git a/api/host.go b/api/host.go index 2a9df5f6b..5536f755c 100644 --- a/api/host.go +++ b/api/host.go @@ -89,13 +89,6 @@ type ( } ) -func DefaultSearchHostOptions() SearchHostOptions { - return SearchHostOptions{ - Limit: -1, - FilterMode: HostFilterModeAllowed, - } -} - func (opts GetHostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 3b90a8329..111dadb6c 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -195,7 +195,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) + hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) if jc.Check("failed to get hosts", err) != nil { return } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index ef64f630b..43ac5e629 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) + hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) if err != nil { return false, err } diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 06f7e133d..64acc2fba 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -23,8 +23,6 @@ func TestBlocklist(t *testing.T) { hosts: 3, }) defer cluster.Shutdown() - - // convenience variables b := cluster.Bus tt := cluster.tt @@ -119,7 +117,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -144,7 +142,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -154,7 +152,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 69ac90391..2346f7019 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index 060152167..de948c970 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index 0a6fb00f6..95e37a26c 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -530,6 +530,11 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains return hosts, err } +// Hosts returns non-blocked hosts at given offset and limit. +func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]hostdb.HostInfo, error) { + return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) +} + func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 735ea4190..35872ea2d 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + allHosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } @@ -158,98 +158,40 @@ func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, setti }) } -// TestSearchHosts is a unit tests for the SearchHosts method. -func TestSearchHosts(t *testing.T) { +// TestSQLHosts tests the Hosts method of the SQLHostDB type. +func TestSQLHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() - // add 3 hosts - var hks []types.PublicKey - for i := 1; i <= 3; i++ { - if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i)); err != nil { - t.Fatal(err) - } - hks = append(hks, types.PublicKey{byte(i)}) + hks, err := ss.addTestHosts(3) + if err != nil { + t.Fatal(err) } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - // assert defaults return all hosts - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { + // assert the hosts method returns the expected hosts + if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - - // assert we can search using offset and limit - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, -1, -1); err != ErrNegativeOffset { + if _, err := ss.Hosts(ctx, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } - // assert we can search by address - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by key - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by address and key - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by key and limit - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // add a custom host and block it - hk4 := types.PublicKey{4} - if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { - t.Fatal("unexpected", err) - } - if err := ss.UpdateHostBlocklistEntries(context.Background(), []string{"host4.com"}, nil, false); err != nil { - t.Fatal("unexpected", err) - } - - // assert host filter mode is applied - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", nil, 0, -1); err != nil || len(hosts) != 4 { - t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeBlocked, "", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { - t.Fatal("unexpected", len(hosts), err) - } -} - -// TestHostsForScanning is a unit test for the HostsForScanning method. -func TestHostsForScanning(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - // add 3 hosts - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // add a scan for every non-blocked host + // Add a scan for each host. n := time.Now() if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { t.Fatal(err) @@ -261,35 +203,77 @@ func TestHostsForScanning(t *testing.T) { t.Fatal(err) } - // fetch all hosts using the HostsForScanning method - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, -1) + // Fetch all hosts using the HostsForScanning method. + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 3 { + } + if len(hostAddresses) != 3 { t.Fatal("wrong number of addresses") - } else if hostAddresses[0].PublicKey != hk3 || - hostAddresses[1].PublicKey != hk2 || - hostAddresses[2].PublicKey != hk1 { + } + if hostAddresses[0].PublicKey != hk3 { + t.Fatal("wrong key") + } + if hostAddresses[1].PublicKey != hk2 { + t.Fatal("wrong key") + } + if hostAddresses[2].PublicKey != hk1 { t.Fatal("wrong key") } - // fetch one host by setting the cutoff exactly to hk2 - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, -1) + // Fetch one host by setting the cutoff exactly to hk2. + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 1 { + } + if len(hostAddresses) != 1 { t.Fatal("wrong number of addresses") } - // fetch no hosts - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, -1) + // Fetch no hosts. + hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 0 { + } + if len(hostAddresses) != 0 { t.Fatal("wrong number of addresses") } } +// TestSearchHosts is a unit test for SearchHosts. +func TestSearchHosts(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + ctx := context.Background() + + // add 3 hosts + var hks []types.PublicKey + for i := 0; i < 3; i++ { + if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i+1)); err != nil { + t.Fatal(err) + } + hks = append(hks, types.PublicKey{byte(i)}) + } + hk1, hk2, hk3 := hks[0], hks[1], hks[2] + + // Search by address. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by key. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by address and key. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by key and limit results + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } +} + // TestRecordScan is a test for recording scans. func TestRecordScan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) @@ -611,7 +595,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } @@ -783,7 +767,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } From e71862eef3798fe646fdb5060d87c3d1b3f17541 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:37:36 +0100 Subject: [PATCH 5/5] all: cleanup PR --- bus/bus.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 0a0614cbf..7d33964be 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -775,9 +775,9 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { return } - // TODO: on the next major release we should: - // - remove api.DefaultSearchHostOptions and set defaults in the handler - // - validate the filter mode here and return a 400 + // TODO: on the next major release + // - set defaults in handler + // - validate request params and return 400 if invalid hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.FilterMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return