diff --git a/api/autopilot.go b/api/autopilot.go index 6283f64f3..fdd6c4942 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -91,9 +91,37 @@ type ( StartTime TimeRFC3339 `json:"startTime"` BuildState } -) -type ( + ConfigEvaluationRequest struct { + AutopilotConfig AutopilotConfig `json:"autopilotConfig"` + GougingSettings GougingSettings `json:"gougingSettings"` + RedundancySettings RedundancySettings `json:"redundancySettings"` + } + + ConfigRecommendation struct { + GougingSettings GougingSettings `json:"gougingSettings"` + } + + // ConfigEvaluationResponse is the response type for /evaluate + ConfigEvaluationResponse struct { + Hosts uint64 `json:"hosts"` + Usable uint64 `json:"usable"` + Unusable struct { + Blocked uint64 `json:"blocked"` + Gouging struct { + Contract uint64 `json:"contract"` + Download uint64 `json:"download"` + Gouging uint64 `json:"gouging"` + Pruning uint64 `json:"pruning"` + Upload uint64 `json:"upload"` + } `json:"gouging"` + NotAcceptingContracts uint64 `json:"notAcceptingContracts"` + NotScanned uint64 `json:"notScanned"` + Unknown uint64 `json:"unknown"` + } + Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` + } + // HostHandlerResponse is the response type for the /host/:hostkey endpoint. HostHandlerResponse struct { Host hostdb.Host `json:"host"` diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index a7cb225ba..7367003e0 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -21,6 +21,7 @@ import ( "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" + "go.sia.tech/renterd/worker" "go.uber.org/zap" ) @@ -166,6 +167,7 @@ func (ap *Autopilot) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ "GET /config": ap.configHandlerGET, "PUT /config": ap.configHandlerPUT, + "POST /config": ap.configHandlerPOST, "POST /hosts": ap.hostsHandlerPOST, "GET /host/:hostKey": ap.hostHandlerGET, "GET /state": ap.stateHandlerGET, @@ -173,6 +175,35 @@ func (ap *Autopilot) Handler() http.Handler { }) } +func (ap *Autopilot) configHandlerPOST(jc jape.Context) { + ctx := jc.Request.Context() + + // decode request + var req api.ConfigEvaluationRequest + if jc.Decode(&req) != nil { + return + } + + // fetch necessary information + cfg := req.AutopilotConfig + gs := req.GougingSettings + rs := req.RedundancySettings + cs, err := ap.bus.ConsensusState(ctx) + if jc.Check("failed to get consensus state", err) != nil { + return + } + state := ap.State() + + // fetch hosts + hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) + if jc.Check("failed to get hosts", err) != nil { + return + } + + // evaluate the config + jc.Encode(evaluateConfig(cfg, cs, state.fee, state.period, rs, gs, hosts)) +} + func (ap *Autopilot) Run() error { ap.startStopMu.Lock() if ap.isRunning() { @@ -702,3 +733,172 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { } jc.Encode(hosts) } + +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + for _, host := range hosts { + usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) + if usable { + usables++ + } + } + return +} + +// evaluateConfig evaluates the given configuration and if the gouging settings +// are too strict for the number of contracts required by 'cfg', it will provide +// a recommendation on how to loosen it. +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + + resp.Hosts = uint64(len(hosts)) + for _, host := range hosts { + usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) + if usable { + resp.Usable++ + continue + } + if usableBreakdown.blocked > 0 { + resp.Unusable.Blocked++ + } + if usableBreakdown.notacceptingcontracts > 0 { + resp.Unusable.NotAcceptingContracts++ + } + if usableBreakdown.notcompletingscan > 0 { + resp.Unusable.NotScanned++ + } + if usableBreakdown.unknown > 0 { + resp.Unusable.Unknown++ + } + if usableBreakdown.gougingBreakdown.ContractErr != "" { + resp.Unusable.Gouging.Contract++ + } + if usableBreakdown.gougingBreakdown.DownloadErr != "" { + resp.Unusable.Gouging.Download++ + } + if usableBreakdown.gougingBreakdown.GougingErr != "" { + resp.Unusable.Gouging.Gouging++ + } + if usableBreakdown.gougingBreakdown.PruneErr != "" { + resp.Unusable.Gouging.Pruning++ + } + if usableBreakdown.gougingBreakdown.UploadErr != "" { + resp.Unusable.Gouging.Upload++ + } + } + + if resp.Usable >= cfg.Contracts.Amount { + return // no recommendation needed + } + + // optimise gouging settings + maxGS := func() api.GougingSettings { + return api.GougingSettings{ + // these are the fields we optimise one-by-one + MaxRPCPrice: types.MaxCurrency, + MaxContractPrice: types.MaxCurrency, + MaxDownloadPrice: types.MaxCurrency, + MaxUploadPrice: types.MaxCurrency, + MaxStoragePrice: types.MaxCurrency, + + // these are not optimised, so we keep the same values as the user + // provided + MinMaxCollateral: gs.MinMaxCollateral, + HostBlockHeightLeeway: gs.HostBlockHeightLeeway, + MinPriceTableValidity: gs.MinPriceTableValidity, + MinAccountExpiry: gs.MinAccountExpiry, + MinMaxEphemeralAccountBalance: gs.MinMaxEphemeralAccountBalance, + MigrationSurchargeMultiplier: gs.MigrationSurchargeMultiplier, + } + } + + // use the input gouging settings as the starting point and try to optimise + // each field independent of the other fields we want to optimise + optimisedGS := gs + success := false + + // MaxRPCPrice + tmpGS := maxGS() + tmpGS.MaxRPCPrice = gs.MaxRPCPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxRPCPrice = tmpGS.MaxRPCPrice + success = true + } + // MaxContractPrice + tmpGS = maxGS() + tmpGS.MaxContractPrice = gs.MaxContractPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxContractPrice = tmpGS.MaxContractPrice + success = true + } + // MaxDownloadPrice + tmpGS = maxGS() + tmpGS.MaxDownloadPrice = gs.MaxDownloadPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxDownloadPrice = tmpGS.MaxDownloadPrice + success = true + } + // MaxUploadPrice + tmpGS = maxGS() + tmpGS.MaxUploadPrice = gs.MaxUploadPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxUploadPrice = tmpGS.MaxUploadPrice + success = true + } + // MaxStoragePrice + tmpGS = maxGS() + tmpGS.MaxStoragePrice = gs.MaxStoragePrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxStoragePrice = tmpGS.MaxStoragePrice + success = true + } + // If one of the optimisations was successful, we return the optimised + // gouging settings + if success { + resp.Recommendation = &api.ConfigRecommendation{ + GougingSettings: optimisedGS, + } + } + return +} + +// optimiseGougingSetting tries to optimise one field of the gouging settings to +// try and hit the target number of contracts. +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { + if cfg.Contracts.Amount == 0 { + return true // nothing to do + } + stepSize := []uint64{200, 150, 125, 110, 105} + maxSteps := 12 + + stepIdx := 0 + nSteps := 0 + prevVal := *field // to keep accurate value + for { + nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) + targetHit := nUsable >= cfg.Contracts.Amount + + if targetHit && nSteps == 0 { + return true // target already hit without optimising + } else if targetHit && stepIdx == len(stepSize)-1 { + return true // target hit after optimising + } else if targetHit { + // move one step back and decrease step size + stepIdx++ + nSteps-- + *field = prevVal + } else if nSteps >= maxSteps { + return false // ran out of steps + } + + // apply next step + prevVal = *field + newValue, overflow := prevVal.Mul64WithOverflow(stepSize[stepIdx]) + if overflow { + return false + } + newValue = newValue.Div64(100) + *field = newValue + nSteps++ + } +} diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go new file mode 100644 index 000000000..f818c312b --- /dev/null +++ b/autopilot/autopilot_test.go @@ -0,0 +1,115 @@ +package autopilot + +import ( + "math" + "testing" + "time" + + rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" +) + +func TestOptimiseGougingSetting(t *testing.T) { + // create 10 hosts that should all be usable + var hosts []hostdb.Host + for i := 0; i < 10; i++ { + hosts = append(hosts, hostdb.Host{ + KnownSince: time.Unix(0, 0), + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + }, + }, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, + }) + } + + // prepare settings that result in all hosts being usable + cfg := api.AutopilotConfig{ + Contracts: api.ContractsConfig{ + Allowance: types.Siacoins(100000), + Amount: 10, + }, + Hosts: api.HostsConfig{}, + } + cs := api.ConsensusState{ + BlockHeight: 100, + LastBlockTime: api.TimeNow(), + Synced: true, + } + fee := types.ZeroCurrency + rs := api.RedundancySettings{MinShards: 10, TotalShards: 30} + gs := api.GougingSettings{ + MaxRPCPrice: types.Siacoins(1), + MaxContractPrice: types.Siacoins(1), + MaxDownloadPrice: types.Siacoins(1), + MaxUploadPrice: types.Siacoins(1), + MaxStoragePrice: types.Siacoins(1), + HostBlockHeightLeeway: math.MaxInt32, + } + + // confirm all hosts are usable + assertUsable := func(n int) { + t.Helper() + nUsable := countUsableHosts(cfg, cs, fee, 0, rs, gs, hosts) + if nUsable != uint64(n) { + t.Fatalf("expected %v usable hosts, got %v", len(hosts), nUsable) + } + } + assertUsable(len(hosts)) + + // Case1: test optimising a field which gets us back to a full set of hosts + for i := range hosts { + hosts[i].Settings.StoragePrice = types.Siacoins(uint32(i + 1)) + } + assertUsable(1) + if !optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { + t.Fatal("optimising failed") + } + assertUsable(len(hosts)) + if gs.MaxStoragePrice.ExactString() != "10164000000000000000000000" { // 10.164 SC + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } + + // Case2: test optimising a field where we can't get back to a full set of + // hosts + hosts[0].Settings.StoragePrice = types.Siacoins(100000) + assertUsable(9) + if optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { + t.Fatal("optimising succeeded") + } + if gs.MaxStoragePrice.ExactString() != "41631744000000000000000000000" { // ~41.63 KS + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } + + // Case3: force overflow + for i := range hosts { + hosts[i].Settings.StoragePrice = types.MaxCurrency + } + gs.MaxStoragePrice = types.MaxCurrency.Sub(types.Siacoins(1)) + assertUsable(0) + if optimiseGougingSetting(&gs, &gs.MaxStoragePrice, cfg, cs, fee, 0, rs, hosts) { + t.Fatal("optimising succeeded") + } + if gs.MaxStoragePrice.ExactString() != "340282366920937463463374607431768211455" { // ~340.3 TS + t.Fatal("unexpected storage price", gs.MaxStoragePrice.ExactString()) + } +} diff --git a/autopilot/client.go b/autopilot/client.go index 35e3981aa..ba16754a5 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -64,3 +64,14 @@ func (c *Client) Trigger(forceScan bool) (_ bool, err error) { err = c.c.POST("/trigger", api.AutopilotTriggerRequest{ForceScan: forceScan}, &resp) return resp.Triggered, err } + +// EvalutateConfig evaluates an autopilot config using the given gouging and +// redundancy settings. +func (c *Client) EvaluateConfig(ctx context.Context, cfg api.AutopilotConfig, gs api.GougingSettings, rs api.RedundancySettings) (resp api.ConfigEvaluationResponse, err error) { + err = c.c.WithContext(ctx).POST("/config", api.ConfigEvaluationRequest{ + AutopilotConfig: cfg, + GougingSettings: gs, + RedundancySettings: rs, + }, &resp) + return +} diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 9e2b52cca..188b55661 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -267,7 +267,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch candidate hosts - candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, math.SmallestNonzeroFloat64) // avoid 0 score hosts + candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts if err != nil { return false, err } @@ -1249,7 +1249,7 @@ func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredH // return early if there's no hosts if len(candidates) == 0 { c.logger.Warn("min host score is set to the smallest non-zero float because there are no candidate hosts") - return math.SmallestNonzeroFloat64 + return smallestValidScore } // determine the number of random hosts we fetch per iteration when @@ -1283,7 +1283,7 @@ func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredH return candidates[i].score > candidates[j].score }) if len(candidates) < int(numContracts) { - return math.SmallestNonzeroFloat64 + return smallestValidScore } else if cutoff := candidates[numContracts-1].score; minScore > cutoff { minScore = cutoff } diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 2ebc81f38..574862a97 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -204,7 +204,7 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. gougingBreakdown = gc.Check(&h.Settings, &h.PriceTable.HostPriceTable) if gougingBreakdown.Gouging() { errs = append(errs, fmt.Errorf("%w: %v", errHostPriceGouging, gougingBreakdown)) - } else { + } else if minScore > 0 { // perform scoring checks // // NOTE: only perform these scoring checks if we know the host is diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index f0f103c6c..b15857d19 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -13,6 +13,8 @@ import ( "go.sia.tech/siad/build" ) +const smallestValidScore = math.SmallestNonzeroFloat64 + func hostScore(cfg api.AutopilotConfig, h hostdb.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { // idealDataPerHost is the amount of data that we would have to put on each // host assuming that our storage requirements were spread evenly across diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 60e2f9b5e..68dc264eb 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -53,17 +53,21 @@ func TestGouging(t *testing.T) { t.Fatal("unexpected data") } + // update the gouging settings to limit the max storage price to 100H + gs := test.GougingSettings + gs.MaxStoragePrice = types.NewCurrency64(100) + if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { + t.Fatal(err) + } // fetch current contract set contracts, err := b.Contracts(context.Background(), api.ContractsOpts{ContractSet: cfg.Set}) tt.OK(err) - // update the host settings so it's gouging + // update one host's settings so it's gouging hk := contracts[0].HostKey host := hostsMap[hk.String()] settings := host.settings.Settings() - settings.IngressPrice = types.Siacoins(1) - settings.EgressPrice = types.Siacoins(1) - settings.ContractPrice = types.Siacoins(11) + settings.StoragePrice = types.NewCurrency64(101) // gouging tt.OK(host.UpdateSettings(settings)) // make sure the price table expires so the worker is forced to fetch it @@ -76,7 +80,7 @@ func TestGouging(t *testing.T) { // update all host settings so they're gouging for _, h := range cluster.hosts { settings := h.settings.Settings() - settings.EgressPrice = types.Siacoins(1) + settings.StoragePrice = types.NewCurrency64(101) if err := h.UpdateSettings(settings); err != nil { t.Fatal(err) } @@ -91,4 +95,20 @@ func TestGouging(t *testing.T) { if err := w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{}); err == nil { t.Fatal("expected download to fail", err) } + + // try optimising gouging settings + resp, err := cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, gs, test.RedundancySettings) + tt.OK(err) + if resp.Recommendation == nil { + t.Fatal("expected recommendation") + } + + // set optimised settings + tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, resp.Recommendation.GougingSettings)) + + // upload some data - should work now once contract maintenance is done + tt.Retry(30, time.Second, func() error { + _, err := w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{}) + return err + }) }