From 649ac9c5126c1929b3c5e64f38c80db23618624b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 7 Mar 2024 10:39:59 +0100 Subject: [PATCH 001/201] worker: wrap errors returned by ReadResponse, WriteResponse, ReadRequest and WriteRequest --- autopilot/contractor.go | 6 +++-- worker/rhpv3.go | 53 +++++++++++++++++++++++++++++++++++++++++ worker/rhpv3_test.go | 34 ++++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 2 deletions(-) create mode 100644 worker/rhpv3_test.go diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 188b55661..c4b28d9fa 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -1422,7 +1422,8 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf "renterFunds", renterFunds, "expectedNewStorage", expectedNewStorage, ) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { + if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) && + !worker.IsErrHost(err) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err @@ -1505,7 +1506,8 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI return api.ContractMetadata{}, true, err } c.logger.Errorw("refresh failed", zap.Error(err), "hk", hk, "fcid", fcid) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { + if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) && + !worker.IsErrHost(err) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 9c280f2bd..e6411d83a 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -47,6 +47,12 @@ const ( ) var ( + // errHost is used to wrap rpc errors returned by the host. + errHost = errors.New("host responded with error") + + // errTransport is used to wrap rpc errors caused by the transport. + errTransport = errors.New("transport error") + // errBalanceInsufficient occurs when a withdrawal failed because the // account balance was insufficient. errBalanceInsufficient = errors.New("ephemeral account balance was insufficient") @@ -83,6 +89,14 @@ var ( errWithdrawalExpired = errors.New("withdrawal request expired") ) +// IsErrHost indicates whether an error was returned by a host as part of an RPC. +func IsErrHost(err error) bool { + if err == nil { + return false + } + return errors.Is(err, errHost) || strings.Contains(err.Error(), errHost.Error()) +} + func isBalanceInsufficient(err error) bool { return isError(err, errBalanceInsufficient) } func isBalanceMaxExceeded(err error) bool { return isError(err, errBalanceMaxExceeded) } func isClosedStream(err error) bool { @@ -110,6 +124,24 @@ func isError(err error, target error) bool { return strings.Contains(strings.ToLower(err.Error()), strings.ToLower(target.Error())) } +// wrapRPCErr extracts the innermost error, wraps it in either a errHost or +// errTransport and finally wraps it using the provided fnName. +func wrapRPCErr(err *error, fnName string) { + if *err == nil { + return + } + innerErr := *err + for errors.Unwrap(innerErr) != nil { + innerErr = errors.Unwrap(innerErr) + } + if errors.As(*err, new(*rhpv3.RPCError)) { + *err = fmt.Errorf("%w: '%w'", errHost, innerErr) + } else { + *err = fmt.Errorf("%w: '%w'", errTransport, innerErr) + } + *err = fmt.Errorf("%s: %w", fnName, *err) +} + // transportV3 is a reference-counted wrapper for rhpv3.Transport. type transportV3 struct { refCount uint64 // locked by pool @@ -125,6 +157,27 @@ type streamV3 struct { *rhpv3.Stream } +func (s *streamV3) ReadResponse(resp rhpv3.ProtocolObject, maxLen uint64) (err error) { + defer wrapRPCErr(&err, "ReadResponse") + return s.Stream.ReadResponse(resp, maxLen) +} + +func (s *streamV3) WriteResponse(resp rhpv3.ProtocolObject) (err error) { + defer wrapRPCErr(&err, "WriteResponse") + return s.Stream.WriteResponse(resp) +} + +// ReadRequest reads an RPC request using the new loop protocol. +func (s *streamV3) ReadRequest(req rhpv3.ProtocolObject, maxLen uint64) (err error) { + defer wrapRPCErr(&err, "ReadRequest") + return s.Stream.ReadRequest(req, maxLen) +} + +func (s *streamV3) WriteRequest(rpcID types.Specifier, req rhpv3.ProtocolObject) (err error) { + defer wrapRPCErr(&err, "WriteRequest") + return s.Stream.WriteRequest(rpcID, req) +} + // Close closes the stream and cancels the goroutine launched by DialStream. func (s *streamV3) Close() error { s.cancel() diff --git a/worker/rhpv3_test.go b/worker/rhpv3_test.go new file mode 100644 index 000000000..83f605807 --- /dev/null +++ b/worker/rhpv3_test.go @@ -0,0 +1,34 @@ +package worker + +import ( + "errors" + "fmt" + "testing" + + rhpv3 "go.sia.tech/core/rhp/v3" +) + +func TestWrapRPCErr(t *testing.T) { + // host error + err := fmt.Errorf("ReadResponse: %w", &rhpv3.RPCError{ + Description: "some host error", + }) + if err.Error() != "ReadResponse: some host error" { + t.Fatal("unexpected error:", err) + } + wrapRPCErr(&err, "ReadResponse") + if err.Error() != "ReadResponse: host responded with error: 'some host error'" { + t.Fatal("unexpected error:", err) + } else if !errors.Is(err, errHost) { + t.Fatalf("expected error to be wrapped with %v, got %v", errHost, err) + } + + // transport error + err = fmt.Errorf("ReadResponse: %w", errors.New("some transport error")) + wrapRPCErr(&err, "ReadResponse") + if err.Error() != "ReadResponse: transport error: 'some transport error'" { + t.Fatal("unexpected error:", err) + } else if !errors.Is(err, errTransport) { + t.Fatalf("expected error to be wrapped with %v, got %v", errHost, err) + } +} From fc7a9b9c075b314336a6af67627da8ea0f5fd63e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 8 Mar 2024 11:25:15 +0100 Subject: [PATCH 002/201] utils: extract IsErr into package --- autopilot/autopilot.go | 11 +++++----- autopilot/contract_pruning.go | 19 +++++++++-------- autopilot/ipfilter.go | 10 ++------- autopilot/migrator.go | 3 ++- autopilot/scanner.go | 3 ++- internal/utils/errors.go | 18 ++++++++++++++++ worker/rhpv3.go | 40 +++++++++++------------------------ worker/uploader.go | 3 ++- worker/worker.go | 3 ++- 9 files changed, 56 insertions(+), 54 deletions(-) create mode 100644 internal/utils/errors.go diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 7367003e0..c53e4ec4c 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -18,6 +18,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" @@ -299,7 +300,7 @@ func (ap *Autopilot) Run() error { // perform maintenance setChanged, err := ap.c.performContractMaintenance(ap.shutdownCtx, w) - if err != nil && isErr(err, context.Canceled) { + if err != nil && utils.IsErr(err, context.Canceled) { return } else if err != nil { ap.logger.Errorf("contract maintenance failed, err: %v", err) @@ -405,9 +406,9 @@ func (ap *Autopilot) blockUntilConfigured(interrupt <-chan time.Time) (configure cancel() // if the config was not found, or we were unable to fetch it, keep blocking - if isErr(err, context.Canceled) { + if utils.IsErr(err, context.Canceled) { return - } else if isErr(err, api.ErrAutopilotNotFound) { + } else if utils.IsErr(err, api.ErrAutopilotNotFound) { once.Do(func() { ap.logger.Info("autopilot is waiting to be configured...") }) } else if err != nil { ap.logger.Errorf("autopilot is unable to fetch its configuration from the bus, err: %v", err) @@ -438,7 +439,7 @@ func (ap *Autopilot) blockUntilOnline() (online bool) { online = len(peers) > 0 cancel() - if isErr(err, context.Canceled) { + if utils.IsErr(err, context.Canceled) { return } else if err != nil { ap.logger.Errorf("failed to get peers, err: %v", err) @@ -472,7 +473,7 @@ func (ap *Autopilot) blockUntilSynced(interrupt <-chan time.Time) (synced, block cancel() // if an error occurred, or if we're not synced, we continue - if isErr(err, context.Canceled) { + if utils.IsErr(err, context.Canceled) { return } else if err != nil { ap.logger.Errorf("failed to get consensus state, err: %v", err) diff --git a/autopilot/contract_pruning.go b/autopilot/contract_pruning.go index e32cd3fa0..aa0eb505f 100644 --- a/autopilot/contract_pruning.go +++ b/autopilot/contract_pruning.go @@ -9,6 +9,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/build" ) @@ -65,14 +66,14 @@ func (pm pruneMetrics) String() string { func (pr pruneResult) toAlert() (id types.Hash256, alert *alerts.Alert) { id = alertIDForContract(alertPruningID, pr.fcid) - if shouldTrigger := pr.err != nil && !((isErr(pr.err, errInvalidMerkleProof) && build.VersionCmp(pr.version, "1.6.0") < 0) || - isErr(pr.err, api.ErrContractNotFound) || // contract got archived - isErr(pr.err, errConnectionRefused) || - isErr(pr.err, errConnectionTimedOut) || - isErr(pr.err, errConnectionResetByPeer) || - isErr(pr.err, errInvalidHandshakeSignature) || - isErr(pr.err, errNoRouteToHost) || - isErr(pr.err, errNoSuchHost)); shouldTrigger { + if shouldTrigger := pr.err != nil && !((utils.IsErr(pr.err, errInvalidMerkleProof) && build.VersionCmp(pr.version, "1.6.0") < 0) || + utils.IsErr(pr.err, api.ErrContractNotFound) || // contract got archived + utils.IsErr(pr.err, errConnectionRefused) || + utils.IsErr(pr.err, errConnectionTimedOut) || + utils.IsErr(pr.err, errConnectionResetByPeer) || + utils.IsErr(pr.err, errInvalidHandshakeSignature) || + utils.IsErr(pr.err, errNoRouteToHost) || + utils.IsErr(pr.err, errNoSuchHost)); shouldTrigger { alert = newContractPruningFailedAlert(pr.hk, pr.version, pr.fcid, pr.err) } return @@ -196,7 +197,7 @@ func (c *contractor) pruneContract(w Worker, fcid types.FileContractID) pruneRes pruned, remaining, err := w.RHPPruneContract(ctx, fcid, timeoutPruneContract) if err != nil && pruned == 0 { return pruneResult{fcid: fcid, hk: host.PublicKey, version: host.Settings.Version, err: err} - } else if err != nil && isErr(err, context.DeadlineExceeded) { + } else if err != nil && utils.IsErr(err, context.DeadlineExceeded) { err = nil } diff --git a/autopilot/ipfilter.go b/autopilot/ipfilter.go index 6aa244047..0932d7676 100644 --- a/autopilot/ipfilter.go +++ b/autopilot/ipfilter.go @@ -9,6 +9,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -137,7 +138,7 @@ func (r *ipResolver) lookup(hostIP string) ([]string, error) { addrs, err := r.resolver.LookupIPAddr(ctx, host) if err != nil { // check the cache if it's an i/o timeout or server misbehaving error - if isErr(err, errIOTimeout) || isErr(err, errServerMisbehaving) { + if utils.IsErr(err, errIOTimeout) || utils.IsErr(err, errServerMisbehaving) { if entry, found := r.cache[hostIP]; found && time.Since(entry.created) < ipCacheEntryValidity { r.logger.Debugf("using cached IP addresses for %v, err: %v", hostIP, err) return entry.subnets, nil @@ -188,10 +189,3 @@ func parseSubnets(addresses []net.IPAddr) []string { return subnets } - -func isErr(err error, target error) bool { - if errors.Is(err, target) { - return true - } - return err != nil && target != nil && strings.Contains(err.Error(), target.Error()) -} diff --git a/autopilot/migrator.go b/autopilot/migrator.go index 4a4e31de6..c55b9c734 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -10,6 +10,7 @@ import ( "time" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/stats" "go.uber.org/zap" @@ -156,7 +157,7 @@ func (m *migrator) performMigrations(p *workerPool) { if err != nil { m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, err) - skipAlert := isErr(err, api.ErrSlabNotFound) + skipAlert := utils.IsErr(err, api.ErrSlabNotFound) if !skipAlert { if res.SurchargeApplied { m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, err)) diff --git a/autopilot/scanner.go b/autopilot/scanner.go index e512d1f87..230400619 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -12,6 +12,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -314,7 +315,7 @@ func (s *scanner) launchScanWorkers(ctx context.Context, w scanWorker, reqs chan scan, err := w.RHPScan(ctx, req.hostKey, req.hostIP, s.currentTimeout()) if err != nil { break // abort - } else if !isErr(errors.New(scan.ScanError), errIOTimeout) && scan.Ping > 0 { + } else if !utils.IsErr(errors.New(scan.ScanError), errIOTimeout) && scan.Ping > 0 { s.tracker.addDataPoint(time.Duration(scan.Ping)) } diff --git a/internal/utils/errors.go b/internal/utils/errors.go new file mode 100644 index 000000000..a8c4bbf59 --- /dev/null +++ b/internal/utils/errors.go @@ -0,0 +1,18 @@ +package utils + +import ( + "errors" + "strings" +) + +// IsErr can be used to compare an error to a target and also works when used on +// errors that haven't been wrapped since it will fall back to a string +// comparison. Useful to check errors returned over the network. +func IsErr(err error, target error) bool { + if (err == nil) != (target == nil) { + return false + } else if errors.Is(err, target) { + return true + } + return strings.Contains(err.Error(), target.Error()) +} diff --git a/worker/rhpv3.go b/worker/rhpv3.go index e6411d83a..203d2c3da 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -10,7 +10,6 @@ import ( "math" "math/big" "net" - "strings" "sync" "time" @@ -20,6 +19,7 @@ import ( "go.sia.tech/mux/v1" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/crypto" "go.uber.org/zap" ) @@ -91,38 +91,23 @@ var ( // IsErrHost indicates whether an error was returned by a host as part of an RPC. func IsErrHost(err error) bool { - if err == nil { - return false - } - return errors.Is(err, errHost) || strings.Contains(err.Error(), errHost.Error()) + return utils.IsErr(err, errHost) } -func isBalanceInsufficient(err error) bool { return isError(err, errBalanceInsufficient) } -func isBalanceMaxExceeded(err error) bool { return isError(err, errBalanceMaxExceeded) } +func isBalanceInsufficient(err error) bool { return utils.IsErr(err, errBalanceInsufficient) } +func isBalanceMaxExceeded(err error) bool { return utils.IsErr(err, errBalanceMaxExceeded) } func isClosedStream(err error) bool { - return isError(err, mux.ErrClosedStream) || isError(err, net.ErrClosed) + return utils.IsErr(err, mux.ErrClosedStream) || utils.IsErr(err, net.ErrClosed) } -func isInsufficientFunds(err error) bool { return isError(err, ErrInsufficientFunds) } -func isPriceTableExpired(err error) bool { return isError(err, errPriceTableExpired) } -func isPriceTableGouging(err error) bool { return isError(err, errPriceTableGouging) } -func isPriceTableNotFound(err error) bool { return isError(err, errPriceTableNotFound) } +func isInsufficientFunds(err error) bool { return utils.IsErr(err, ErrInsufficientFunds) } +func isPriceTableExpired(err error) bool { return utils.IsErr(err, errPriceTableExpired) } +func isPriceTableGouging(err error) bool { return utils.IsErr(err, errPriceTableGouging) } +func isPriceTableNotFound(err error) bool { return utils.IsErr(err, errPriceTableNotFound) } func isSectorNotFound(err error) bool { - return isError(err, errSectorNotFound) || isError(err, errSectorNotFoundOld) -} -func isWithdrawalsInactive(err error) bool { return isError(err, errWithdrawalsInactive) } -func isWithdrawalExpired(err error) bool { return isError(err, errWithdrawalExpired) } - -func isError(err error, target error) bool { - if err == nil { - return err == target - } - // compare error first - if errors.Is(err, target) { - return true - } - // then compare the string in case the error was returned by a host - return strings.Contains(strings.ToLower(err.Error()), strings.ToLower(target.Error())) + return utils.IsErr(err, errSectorNotFound) || utils.IsErr(err, errSectorNotFoundOld) } +func isWithdrawalsInactive(err error) bool { return utils.IsErr(err, errWithdrawalsInactive) } +func isWithdrawalExpired(err error) bool { return utils.IsErr(err, errWithdrawalExpired) } // wrapRPCErr extracts the innermost error, wraps it in either a errHost or // errTransport and finally wraps it using the provided fnName. @@ -167,7 +152,6 @@ func (s *streamV3) WriteResponse(resp rhpv3.ProtocolObject) (err error) { return s.Stream.WriteResponse(resp) } -// ReadRequest reads an RPC request using the new loop protocol. func (s *streamV3) ReadRequest(req rhpv3.ProtocolObject, maxLen uint64) (err error) { defer wrapRPCErr(&err, "ReadRequest") return s.Stream.ReadRequest(req, maxLen) diff --git a/worker/uploader.go b/worker/uploader.go index 28b04033d..403accbc8 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -11,6 +11,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/stats" "go.uber.org/zap" ) @@ -298,7 +299,7 @@ func (u *uploader) tryRecomputeStats() { func (u *uploader) tryRefresh(ctx context.Context) bool { // fetch the renewed contract renewed, err := u.cs.RenewedContract(ctx, u.ContractID()) - if isError(err, api.ErrContractNotFound) || isError(err, context.Canceled) { + if utils.IsErr(err, api.ErrContractNotFound) || utils.IsErr(err, context.Canceled) { return false } else if err != nil { u.logger.Errorf("failed to fetch renewed contract %v, err: %v", u.ContractID(), err) diff --git a/worker/worker.go b/worker/worker.go index b335a5f6c..01e3d817e 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -24,6 +24,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker/client" @@ -1162,7 +1163,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { // fetch upload from bus upload, err := w.bus.MultipartUpload(ctx, uploadID) - if isError(err, api.ErrMultipartUploadNotFound) { + if utils.IsErr(err, api.ErrMultipartUploadNotFound) { jc.Error(err, http.StatusNotFound) return } else if jc.Check("failed to fetch multipart upload", err) != nil { From 9bc66bfc042362edc934d18895ba7f6a8ed4fa31 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 7 Mar 2024 10:39:59 +0100 Subject: [PATCH 003/201] worker: wrap errors returned by ReadResponse, WriteResponse, ReadRequest and WriteRequest --- autopilot/contractor.go | 6 +++-- worker/rhpv3.go | 53 +++++++++++++++++++++++++++++++++++++++++ worker/rhpv3_test.go | 34 ++++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 2 deletions(-) create mode 100644 worker/rhpv3_test.go diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 4e5e8c842..f866d3267 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -1425,7 +1425,8 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf "renterFunds", renterFunds, "expectedNewStorage", expectedNewStorage, ) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { + if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) && + !worker.IsErrHost(err) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err @@ -1508,7 +1509,8 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI return api.ContractMetadata{}, true, err } c.logger.Errorw("refresh failed", zap.Error(err), "hk", hk, "fcid", fcid) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { + if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) && + !worker.IsErrHost(err) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 9c280f2bd..e6411d83a 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -47,6 +47,12 @@ const ( ) var ( + // errHost is used to wrap rpc errors returned by the host. + errHost = errors.New("host responded with error") + + // errTransport is used to wrap rpc errors caused by the transport. + errTransport = errors.New("transport error") + // errBalanceInsufficient occurs when a withdrawal failed because the // account balance was insufficient. errBalanceInsufficient = errors.New("ephemeral account balance was insufficient") @@ -83,6 +89,14 @@ var ( errWithdrawalExpired = errors.New("withdrawal request expired") ) +// IsErrHost indicates whether an error was returned by a host as part of an RPC. +func IsErrHost(err error) bool { + if err == nil { + return false + } + return errors.Is(err, errHost) || strings.Contains(err.Error(), errHost.Error()) +} + func isBalanceInsufficient(err error) bool { return isError(err, errBalanceInsufficient) } func isBalanceMaxExceeded(err error) bool { return isError(err, errBalanceMaxExceeded) } func isClosedStream(err error) bool { @@ -110,6 +124,24 @@ func isError(err error, target error) bool { return strings.Contains(strings.ToLower(err.Error()), strings.ToLower(target.Error())) } +// wrapRPCErr extracts the innermost error, wraps it in either a errHost or +// errTransport and finally wraps it using the provided fnName. +func wrapRPCErr(err *error, fnName string) { + if *err == nil { + return + } + innerErr := *err + for errors.Unwrap(innerErr) != nil { + innerErr = errors.Unwrap(innerErr) + } + if errors.As(*err, new(*rhpv3.RPCError)) { + *err = fmt.Errorf("%w: '%w'", errHost, innerErr) + } else { + *err = fmt.Errorf("%w: '%w'", errTransport, innerErr) + } + *err = fmt.Errorf("%s: %w", fnName, *err) +} + // transportV3 is a reference-counted wrapper for rhpv3.Transport. type transportV3 struct { refCount uint64 // locked by pool @@ -125,6 +157,27 @@ type streamV3 struct { *rhpv3.Stream } +func (s *streamV3) ReadResponse(resp rhpv3.ProtocolObject, maxLen uint64) (err error) { + defer wrapRPCErr(&err, "ReadResponse") + return s.Stream.ReadResponse(resp, maxLen) +} + +func (s *streamV3) WriteResponse(resp rhpv3.ProtocolObject) (err error) { + defer wrapRPCErr(&err, "WriteResponse") + return s.Stream.WriteResponse(resp) +} + +// ReadRequest reads an RPC request using the new loop protocol. +func (s *streamV3) ReadRequest(req rhpv3.ProtocolObject, maxLen uint64) (err error) { + defer wrapRPCErr(&err, "ReadRequest") + return s.Stream.ReadRequest(req, maxLen) +} + +func (s *streamV3) WriteRequest(rpcID types.Specifier, req rhpv3.ProtocolObject) (err error) { + defer wrapRPCErr(&err, "WriteRequest") + return s.Stream.WriteRequest(rpcID, req) +} + // Close closes the stream and cancels the goroutine launched by DialStream. func (s *streamV3) Close() error { s.cancel() diff --git a/worker/rhpv3_test.go b/worker/rhpv3_test.go new file mode 100644 index 000000000..83f605807 --- /dev/null +++ b/worker/rhpv3_test.go @@ -0,0 +1,34 @@ +package worker + +import ( + "errors" + "fmt" + "testing" + + rhpv3 "go.sia.tech/core/rhp/v3" +) + +func TestWrapRPCErr(t *testing.T) { + // host error + err := fmt.Errorf("ReadResponse: %w", &rhpv3.RPCError{ + Description: "some host error", + }) + if err.Error() != "ReadResponse: some host error" { + t.Fatal("unexpected error:", err) + } + wrapRPCErr(&err, "ReadResponse") + if err.Error() != "ReadResponse: host responded with error: 'some host error'" { + t.Fatal("unexpected error:", err) + } else if !errors.Is(err, errHost) { + t.Fatalf("expected error to be wrapped with %v, got %v", errHost, err) + } + + // transport error + err = fmt.Errorf("ReadResponse: %w", errors.New("some transport error")) + wrapRPCErr(&err, "ReadResponse") + if err.Error() != "ReadResponse: transport error: 'some transport error'" { + t.Fatal("unexpected error:", err) + } else if !errors.Is(err, errTransport) { + t.Fatalf("expected error to be wrapped with %v, got %v", errHost, err) + } +} From e80f9741f37b4ec39ecbceae2925375f2edf2117 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 8 Mar 2024 11:25:15 +0100 Subject: [PATCH 004/201] utils: extract IsErr into package --- autopilot/autopilot.go | 11 +++++----- autopilot/contract_pruning.go | 19 +++++++++-------- autopilot/ipfilter.go | 10 ++------- autopilot/migrator.go | 3 ++- autopilot/scanner.go | 3 ++- internal/utils/errors.go | 18 ++++++++++++++++ worker/rhpv3.go | 40 +++++++++++------------------------ worker/uploader.go | 3 ++- worker/worker.go | 3 ++- 9 files changed, 56 insertions(+), 54 deletions(-) create mode 100644 internal/utils/errors.go diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 7367003e0..c53e4ec4c 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -18,6 +18,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" @@ -299,7 +300,7 @@ func (ap *Autopilot) Run() error { // perform maintenance setChanged, err := ap.c.performContractMaintenance(ap.shutdownCtx, w) - if err != nil && isErr(err, context.Canceled) { + if err != nil && utils.IsErr(err, context.Canceled) { return } else if err != nil { ap.logger.Errorf("contract maintenance failed, err: %v", err) @@ -405,9 +406,9 @@ func (ap *Autopilot) blockUntilConfigured(interrupt <-chan time.Time) (configure cancel() // if the config was not found, or we were unable to fetch it, keep blocking - if isErr(err, context.Canceled) { + if utils.IsErr(err, context.Canceled) { return - } else if isErr(err, api.ErrAutopilotNotFound) { + } else if utils.IsErr(err, api.ErrAutopilotNotFound) { once.Do(func() { ap.logger.Info("autopilot is waiting to be configured...") }) } else if err != nil { ap.logger.Errorf("autopilot is unable to fetch its configuration from the bus, err: %v", err) @@ -438,7 +439,7 @@ func (ap *Autopilot) blockUntilOnline() (online bool) { online = len(peers) > 0 cancel() - if isErr(err, context.Canceled) { + if utils.IsErr(err, context.Canceled) { return } else if err != nil { ap.logger.Errorf("failed to get peers, err: %v", err) @@ -472,7 +473,7 @@ func (ap *Autopilot) blockUntilSynced(interrupt <-chan time.Time) (synced, block cancel() // if an error occurred, or if we're not synced, we continue - if isErr(err, context.Canceled) { + if utils.IsErr(err, context.Canceled) { return } else if err != nil { ap.logger.Errorf("failed to get consensus state, err: %v", err) diff --git a/autopilot/contract_pruning.go b/autopilot/contract_pruning.go index e32cd3fa0..aa0eb505f 100644 --- a/autopilot/contract_pruning.go +++ b/autopilot/contract_pruning.go @@ -9,6 +9,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/build" ) @@ -65,14 +66,14 @@ func (pm pruneMetrics) String() string { func (pr pruneResult) toAlert() (id types.Hash256, alert *alerts.Alert) { id = alertIDForContract(alertPruningID, pr.fcid) - if shouldTrigger := pr.err != nil && !((isErr(pr.err, errInvalidMerkleProof) && build.VersionCmp(pr.version, "1.6.0") < 0) || - isErr(pr.err, api.ErrContractNotFound) || // contract got archived - isErr(pr.err, errConnectionRefused) || - isErr(pr.err, errConnectionTimedOut) || - isErr(pr.err, errConnectionResetByPeer) || - isErr(pr.err, errInvalidHandshakeSignature) || - isErr(pr.err, errNoRouteToHost) || - isErr(pr.err, errNoSuchHost)); shouldTrigger { + if shouldTrigger := pr.err != nil && !((utils.IsErr(pr.err, errInvalidMerkleProof) && build.VersionCmp(pr.version, "1.6.0") < 0) || + utils.IsErr(pr.err, api.ErrContractNotFound) || // contract got archived + utils.IsErr(pr.err, errConnectionRefused) || + utils.IsErr(pr.err, errConnectionTimedOut) || + utils.IsErr(pr.err, errConnectionResetByPeer) || + utils.IsErr(pr.err, errInvalidHandshakeSignature) || + utils.IsErr(pr.err, errNoRouteToHost) || + utils.IsErr(pr.err, errNoSuchHost)); shouldTrigger { alert = newContractPruningFailedAlert(pr.hk, pr.version, pr.fcid, pr.err) } return @@ -196,7 +197,7 @@ func (c *contractor) pruneContract(w Worker, fcid types.FileContractID) pruneRes pruned, remaining, err := w.RHPPruneContract(ctx, fcid, timeoutPruneContract) if err != nil && pruned == 0 { return pruneResult{fcid: fcid, hk: host.PublicKey, version: host.Settings.Version, err: err} - } else if err != nil && isErr(err, context.DeadlineExceeded) { + } else if err != nil && utils.IsErr(err, context.DeadlineExceeded) { err = nil } diff --git a/autopilot/ipfilter.go b/autopilot/ipfilter.go index 6aa244047..0932d7676 100644 --- a/autopilot/ipfilter.go +++ b/autopilot/ipfilter.go @@ -9,6 +9,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -137,7 +138,7 @@ func (r *ipResolver) lookup(hostIP string) ([]string, error) { addrs, err := r.resolver.LookupIPAddr(ctx, host) if err != nil { // check the cache if it's an i/o timeout or server misbehaving error - if isErr(err, errIOTimeout) || isErr(err, errServerMisbehaving) { + if utils.IsErr(err, errIOTimeout) || utils.IsErr(err, errServerMisbehaving) { if entry, found := r.cache[hostIP]; found && time.Since(entry.created) < ipCacheEntryValidity { r.logger.Debugf("using cached IP addresses for %v, err: %v", hostIP, err) return entry.subnets, nil @@ -188,10 +189,3 @@ func parseSubnets(addresses []net.IPAddr) []string { return subnets } - -func isErr(err error, target error) bool { - if errors.Is(err, target) { - return true - } - return err != nil && target != nil && strings.Contains(err.Error(), target.Error()) -} diff --git a/autopilot/migrator.go b/autopilot/migrator.go index 4a4e31de6..c55b9c734 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -10,6 +10,7 @@ import ( "time" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/stats" "go.uber.org/zap" @@ -156,7 +157,7 @@ func (m *migrator) performMigrations(p *workerPool) { if err != nil { m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, err) - skipAlert := isErr(err, api.ErrSlabNotFound) + skipAlert := utils.IsErr(err, api.ErrSlabNotFound) if !skipAlert { if res.SurchargeApplied { m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, err)) diff --git a/autopilot/scanner.go b/autopilot/scanner.go index e512d1f87..230400619 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -12,6 +12,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -314,7 +315,7 @@ func (s *scanner) launchScanWorkers(ctx context.Context, w scanWorker, reqs chan scan, err := w.RHPScan(ctx, req.hostKey, req.hostIP, s.currentTimeout()) if err != nil { break // abort - } else if !isErr(errors.New(scan.ScanError), errIOTimeout) && scan.Ping > 0 { + } else if !utils.IsErr(errors.New(scan.ScanError), errIOTimeout) && scan.Ping > 0 { s.tracker.addDataPoint(time.Duration(scan.Ping)) } diff --git a/internal/utils/errors.go b/internal/utils/errors.go new file mode 100644 index 000000000..a8c4bbf59 --- /dev/null +++ b/internal/utils/errors.go @@ -0,0 +1,18 @@ +package utils + +import ( + "errors" + "strings" +) + +// IsErr can be used to compare an error to a target and also works when used on +// errors that haven't been wrapped since it will fall back to a string +// comparison. Useful to check errors returned over the network. +func IsErr(err error, target error) bool { + if (err == nil) != (target == nil) { + return false + } else if errors.Is(err, target) { + return true + } + return strings.Contains(err.Error(), target.Error()) +} diff --git a/worker/rhpv3.go b/worker/rhpv3.go index e6411d83a..203d2c3da 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -10,7 +10,6 @@ import ( "math" "math/big" "net" - "strings" "sync" "time" @@ -20,6 +19,7 @@ import ( "go.sia.tech/mux/v1" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/crypto" "go.uber.org/zap" ) @@ -91,38 +91,23 @@ var ( // IsErrHost indicates whether an error was returned by a host as part of an RPC. func IsErrHost(err error) bool { - if err == nil { - return false - } - return errors.Is(err, errHost) || strings.Contains(err.Error(), errHost.Error()) + return utils.IsErr(err, errHost) } -func isBalanceInsufficient(err error) bool { return isError(err, errBalanceInsufficient) } -func isBalanceMaxExceeded(err error) bool { return isError(err, errBalanceMaxExceeded) } +func isBalanceInsufficient(err error) bool { return utils.IsErr(err, errBalanceInsufficient) } +func isBalanceMaxExceeded(err error) bool { return utils.IsErr(err, errBalanceMaxExceeded) } func isClosedStream(err error) bool { - return isError(err, mux.ErrClosedStream) || isError(err, net.ErrClosed) + return utils.IsErr(err, mux.ErrClosedStream) || utils.IsErr(err, net.ErrClosed) } -func isInsufficientFunds(err error) bool { return isError(err, ErrInsufficientFunds) } -func isPriceTableExpired(err error) bool { return isError(err, errPriceTableExpired) } -func isPriceTableGouging(err error) bool { return isError(err, errPriceTableGouging) } -func isPriceTableNotFound(err error) bool { return isError(err, errPriceTableNotFound) } +func isInsufficientFunds(err error) bool { return utils.IsErr(err, ErrInsufficientFunds) } +func isPriceTableExpired(err error) bool { return utils.IsErr(err, errPriceTableExpired) } +func isPriceTableGouging(err error) bool { return utils.IsErr(err, errPriceTableGouging) } +func isPriceTableNotFound(err error) bool { return utils.IsErr(err, errPriceTableNotFound) } func isSectorNotFound(err error) bool { - return isError(err, errSectorNotFound) || isError(err, errSectorNotFoundOld) -} -func isWithdrawalsInactive(err error) bool { return isError(err, errWithdrawalsInactive) } -func isWithdrawalExpired(err error) bool { return isError(err, errWithdrawalExpired) } - -func isError(err error, target error) bool { - if err == nil { - return err == target - } - // compare error first - if errors.Is(err, target) { - return true - } - // then compare the string in case the error was returned by a host - return strings.Contains(strings.ToLower(err.Error()), strings.ToLower(target.Error())) + return utils.IsErr(err, errSectorNotFound) || utils.IsErr(err, errSectorNotFoundOld) } +func isWithdrawalsInactive(err error) bool { return utils.IsErr(err, errWithdrawalsInactive) } +func isWithdrawalExpired(err error) bool { return utils.IsErr(err, errWithdrawalExpired) } // wrapRPCErr extracts the innermost error, wraps it in either a errHost or // errTransport and finally wraps it using the provided fnName. @@ -167,7 +152,6 @@ func (s *streamV3) WriteResponse(resp rhpv3.ProtocolObject) (err error) { return s.Stream.WriteResponse(resp) } -// ReadRequest reads an RPC request using the new loop protocol. func (s *streamV3) ReadRequest(req rhpv3.ProtocolObject, maxLen uint64) (err error) { defer wrapRPCErr(&err, "ReadRequest") return s.Stream.ReadRequest(req, maxLen) diff --git a/worker/uploader.go b/worker/uploader.go index 28b04033d..403accbc8 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -11,6 +11,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/stats" "go.uber.org/zap" ) @@ -298,7 +299,7 @@ func (u *uploader) tryRecomputeStats() { func (u *uploader) tryRefresh(ctx context.Context) bool { // fetch the renewed contract renewed, err := u.cs.RenewedContract(ctx, u.ContractID()) - if isError(err, api.ErrContractNotFound) || isError(err, context.Canceled) { + if utils.IsErr(err, api.ErrContractNotFound) || utils.IsErr(err, context.Canceled) { return false } else if err != nil { u.logger.Errorf("failed to fetch renewed contract %v, err: %v", u.ContractID(), err) diff --git a/worker/worker.go b/worker/worker.go index 9e4dacdd2..b3a650608 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -25,6 +25,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker/client" @@ -1197,7 +1198,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { // fetch upload from bus upload, err := w.bus.MultipartUpload(ctx, uploadID) - if isError(err, api.ErrMultipartUploadNotFound) { + if utils.IsErr(err, api.ErrMultipartUploadNotFound) { jc.Error(err, http.StatusNotFound) return } else if jc.Check("failed to fetch multipart upload", err) != nil { From 3da4ffffaace10ea1d539a6e8cc8d0372e8de964 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 8 Mar 2024 11:35:47 +0100 Subject: [PATCH 005/201] worker: fix lint --- autopilot/contractor.go | 7 +++---- worker/worker.go | 16 ++++++++-------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index f866d3267..d69f2a354 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -16,6 +16,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/worker" "go.uber.org/zap" @@ -1425,8 +1426,7 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf "renterFunds", renterFunds, "expectedNewStorage", expectedNewStorage, ) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) && - !worker.IsErrHost(err) { + if utils.IsErr(err, wallet.ErrInsufficientBalance) && !worker.IsErrHost(err) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err @@ -1509,8 +1509,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI return api.ContractMetadata{}, true, err } c.logger.Errorw("refresh failed", zap.Error(err), "hk", hk, "fcid", fcid) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) && - !worker.IsErrHost(err) { + if utils.IsErr(err, wallet.ErrInsufficientBalance) && !worker.IsErrHost(err) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err diff --git a/worker/worker.go b/worker/worker.go index b3a650608..fb645840d 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1547,14 +1547,14 @@ func discardTxnOnErr(ctx context.Context, bus Bus, l *zap.SugaredLogger, txn typ } func isErrHostUnreachable(err error) bool { - return isError(err, os.ErrDeadlineExceeded) || - isError(err, context.DeadlineExceeded) || - isError(err, api.ErrHostOnPrivateNetwork) || - isError(err, errors.New("no route to host")) || - isError(err, errors.New("no such host")) || - isError(err, errors.New("connection refused")) || - isError(err, errors.New("unknown port")) || - isError(err, errors.New("cannot assign requested address")) + return utils.IsErr(err, os.ErrDeadlineExceeded) || + utils.IsErr(err, context.DeadlineExceeded) || + utils.IsErr(err, api.ErrHostOnPrivateNetwork) || + utils.IsErr(err, errors.New("no route to host")) || + utils.IsErr(err, errors.New("no such host")) || + utils.IsErr(err, errors.New("connection refused")) || + utils.IsErr(err, errors.New("unknown port")) || + utils.IsErr(err, errors.New("cannot assign requested address")) } func isErrDuplicateTransactionSet(err error) bool { From 92a24c824b8045d7167a67883d0377249f0bbe2f Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 11 Mar 2024 12:11:50 +0100 Subject: [PATCH 006/201] docs: add backup sections to README.md --- README.md | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/README.md b/README.md index a4ccc8681..10ef2b0cb 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,95 @@ API documentation can be found [here](https://api.sia.tech/renterd).
Setup guides are available on our [website](https://docs.sia.tech/renting/setting-up-renterd).
A project roadmap is available on [GitHub](https://github.com/orgs/SiaFoundation/projects/5). +## Backups + +This section provides instructions on creating backups for `renterd` and +restoring from those backups. Taking backups at regular intervals and testing +whether you can restore from such a backup are crucial for ensuring the +integrity and availability of your data. Make sure to never store your backups +on the same machine `renterd` is running on. + +### Databases + +When uploading data to the Sia Network, `renterd` stores all necessary metadata +to be able to download that data in its SQL databases. `renterd` uses two +databases: + +- **main database**: contains all object -, contracts - and host metadata +- **metrics database**: contains contract spending, performance metrics + +The main database is the most important one because it contains the object +metadata, which is crucial to be able to recover your data. The `metrics` +database is less important but there's various UI features that depend on it. + +--- +**NOTE** + +The name of these databases are configurable (see `RENTERD_DB_NAME` and +`RENTERD_DB_METRICS_NAME`), so make sure to use the configured values, the +following section will assume `renterd` defaults which are `renterd` and +`renterd_metrics` for the main and metrics database respectively. + +--- + +Depending on the user's configuration, these database are either SQLite or MySQL +database. The following section outlines how to backup `renterd` in both +scenarios. + +#### SQLite + +Backing up a SQLite database can be done using the following command: + +```bash +sqlite3 db.sqlite ".backup 'db.bkp'" +sqlite3 metrics.sqlite ".backup 'metrics.bkp'" +``` + +There is an alternative `.dump` command, which exports the database into a text +file containing SQL statements. This backup is useful in its own, because it's +more portable and can be used to import the contents of the database into +another database on another system entirely. The `.backup` however yields a +byte-for-byte replica of the original database file, it is usually a lot faster +on large databases and it can be performed on a database that's actively being +read or written to, even though we advise to shut down the renter before taking +a backup. + +Restoring from a backup is as simple as putting the backup in place of the original. +Another useful tool for backing up SQLite database is https://litestream.io/. + +#### MySQL + +Backuping up a MySQL database can be done using the `mysqldump` command. It's a +utility provided by MySQL to backup or transfer a MySQL database. It's usually +installed alongside the MySQL cient tools. + +The following command assumes MySQL is being ran from within a docker container: + +```bash +docker exec [MYSQL_CONTAINER_NAME] /usr/bin/mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd > renterd_bkp.sql + +docker exec [MYSQL_CONTAINER_NAME] /usr/bin/mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics > renterd_metrics_bkp.sql +``` + +Restoring from this backup can be done using: + +```bash +cat renterd_bkp.sql | docker exec -i [MYSQL_CONTAINER_NAME] /usr/bin/mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd + +cat renterd_metrics_bkp.sql | docker exec -i [MYSQL_CONTAINER_NAME] /usr/bin/mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics +``` + +### Partial Slabs + +For users that have upload packing enabled, it is very important to back up +partial slabs alongside the database backups. These partial slabs are +essentially a sort of buffer that gets uploaded to the network when that buffer +reaches the size of a full slab, drastically speeding up a bunch of small file +uploads. To ensure consistency between the database and these files on disk, it +is recommended to gracefully shut the renter down before taking a backup of its +database. These partial slabs are located in a folder called `partial_slabs`, +right in the root folder. + ## Docker Support `renterd` includes a `Dockerfile` which can be used for building and running From 7dd11e3f6f1a5f3ceccd98896dc6d93f88082157 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 11 Mar 2024 14:43:41 +0100 Subject: [PATCH 007/201] stores: create index on created_at column of objects table --- stores/migrations.go | 6 ++++++ .../mysql/main/migration_00006_idx_objects_created_at.sql | 1 + stores/migrations/mysql/main/schema.sql | 1 + .../sqlite/main/migration_00006_idx_objects_created_at.sql | 1 + stores/migrations/sqlite/main/schema.sql | 1 + 5 files changed, 10 insertions(+) create mode 100644 stores/migrations/mysql/main/migration_00006_idx_objects_created_at.sql create mode 100644 stores/migrations/sqlite/main/migration_00006_idx_objects_created_at.sql diff --git a/stores/migrations.go b/stores/migrations.go index cb0a38b18..6395b52bb 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -56,6 +56,12 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return performMigration(tx, dbIdentifier, "00005_zero_size_object_health", logger) }, }, + { + ID: "00006_idx_objects_created_at", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00006_idx_objects_created_at", logger) + }, + }, } // Create migrator. diff --git a/stores/migrations/mysql/main/migration_00006_idx_objects_created_at.sql b/stores/migrations/mysql/main/migration_00006_idx_objects_created_at.sql new file mode 100644 index 000000000..310c9a1c3 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00006_idx_objects_created_at.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_created_at` ON `objects`(`created_at`); diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index a5ed86807..68b42ae47 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -331,6 +331,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_health` (`health`), KEY `idx_objects_etag` (`etag`), KEY `idx_objects_size` (`size`), + KEY `idx_objects_created_at` (`created_at`), CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/sqlite/main/migration_00006_idx_objects_created_at.sql b/stores/migrations/sqlite/main/migration_00006_idx_objects_created_at.sql new file mode 100644 index 000000000..310c9a1c3 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00006_idx_objects_created_at.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_created_at` ON `objects`(`created_at`); diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 8d7afeaa1..9875e81e3 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -52,6 +52,7 @@ CREATE INDEX `idx_objects_health` ON `objects`(`health`); CREATE INDEX `idx_objects_object_id` ON `objects`(`object_id`); CREATE INDEX `idx_objects_size` ON `objects`(`size`); CREATE UNIQUE INDEX `idx_object_bucket` ON `objects`(`db_bucket_id`,`object_id`); +CREATE INDEX `idx_objects_created_at` ON `objects`(`created_at`); -- dbMultipartUpload CREATE TABLE `multipart_uploads` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`key` blob,`upload_id` text NOT NULL,`object_id` text NOT NULL,`db_bucket_id` integer NOT NULL,`mime_type` text,CONSTRAINT `fk_multipart_uploads_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`) ON DELETE CASCADE); From c087c6b6c383010d996c859ddd27b0b6e6b51939 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 11 Mar 2024 17:35:50 +0100 Subject: [PATCH 008/201] publish.yml: use environment file instead of set-output --- .github/workflows/publish.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 824f69231..73b96965a 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -263,7 +263,7 @@ jobs: steps: - name: Extract Tag Name id: get_tag - run: echo "::set-output name=tag_name::${GITHUB_REF#refs/tags/}" + run: echo "tag_name=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV - name: Repository Dispatch uses: peter-evans/repository-dispatch@v3 @@ -274,7 +274,7 @@ jobs: client-payload: > { "description": "Renterd: The Next-Gen Sia Renter", - "tag": "${{ steps.get_tag.outputs.tag_name }}", + "tag": "${{ env.tag_name }}", "project": "renterd", "workflow_id": "${{ github.run_id }}" } \ No newline at end of file From 2a946cb294a2d04e95f9f880fbafe4d2600a7501 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 11 Mar 2024 13:38:42 +0100 Subject: [PATCH 009/201] stores: persist user metadata in CompleteMultipartUpload --- api/multipart.go | 5 +++++ bus/bus.go | 6 ++++-- bus/client/multipart-upload.go | 3 ++- go.mod | 2 +- go.sum | 4 ++-- internal/test/e2e/cluster_test.go | 4 ++-- internal/test/e2e/s3_test.go | 32 +++++++++++++++++++++++++++++++ s3/authentication.go | 4 ++-- s3/backend.go | 7 +++++-- s3/s3.go | 2 +- stores/metadata.go | 14 ++++++-------- stores/multipart.go | 10 +++++++++- stores/multipart_test.go | 2 +- 13 files changed, 72 insertions(+), 23 deletions(-) diff --git a/api/multipart.go b/api/multipart.go index a191b2b13..0654386c0 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -51,6 +51,10 @@ type ( MimeType string Metadata ObjectUserMetadata } + + CompleteMultipartOptions struct { + Metadata ObjectUserMetadata + } ) type ( @@ -76,6 +80,7 @@ type ( MultipartCompleteRequest struct { Bucket string `json:"bucket"` + Metadata ObjectUserMetadata Path string `json:"path"` UploadID string `json:"uploadID"` Parts []MultipartCompletedPart diff --git a/bus/bus.go b/bus/bus.go index 045b8e82a..05770eb96 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -150,7 +150,7 @@ type ( AbortMultipartUpload(ctx context.Context, bucketName, path string, uploadID string) (err error) AddMultipartPart(ctx context.Context, bucketName, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) - CompleteMultipartUpload(ctx context.Context, bucketName, path, uploadID string, parts []api.MultipartCompletedPart) (_ api.MultipartCompleteResponse, err error) + CompleteMultipartUpload(ctx context.Context, bucketName, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) CreateMultipartUpload(ctx context.Context, bucketName, path string, ec object.EncryptionKey, mimeType string, metadata api.ObjectUserMetadata) (api.MultipartCreateResponse, error) MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, _ error) MultipartUploads(ctx context.Context, bucketName, prefix, keyMarker, uploadIDMarker string, maxUploads int) (resp api.MultipartListUploadsResponse, _ error) @@ -2244,7 +2244,9 @@ func (b *bus) multipartHandlerCompletePOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - resp, err := b.ms.CompleteMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, req.UploadID, req.Parts) + resp, err := b.ms.CompleteMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, req.UploadID, req.Parts, api.CompleteMultipartOptions{ + Metadata: req.Metadata, + }) if jc.Check("failed to complete multipart upload", err) != nil { return } diff --git a/bus/client/multipart-upload.go b/bus/client/multipart-upload.go index 281019487..6fd06204c 100644 --- a/bus/client/multipart-upload.go +++ b/bus/client/multipart-upload.go @@ -33,10 +33,11 @@ func (c *Client) AddMultipartPart(ctx context.Context, bucket, path, contractSet } // CompleteMultipartUpload completes a multipart upload. -func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart) (resp api.MultipartCompleteResponse, err error) { +func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (resp api.MultipartCompleteResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/complete", api.MultipartCompleteRequest{ Bucket: bucket, Path: path, + Metadata: opts.Metadata, UploadID: uploadID, Parts: parts, }, &resp) diff --git a/go.mod b/go.mod index 237598d4c..e0933ff50 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe go.sia.tech/core v0.2.1 go.sia.tech/coreutils v0.0.3 - go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 + go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db go.sia.tech/hostd v1.0.2 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 diff --git a/go.sum b/go.sum index 2a3b756ab..f42a8843a 100644 --- a/go.sum +++ b/go.sum @@ -240,8 +240,8 @@ go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= -go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 h1:ulzfJNjxN5DjXHClkW2pTiDk+eJ+0NQhX87lFDZ03t0= -go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= +go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db h1:t35K7tD79+ZZPHJ8XPaFopQvhGlQ5r1o9UgZnLOTvmc= +go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= go.sia.tech/hostd v1.0.2 h1:GjzNIAlwg3/dViF6258Xn5DI3+otQLRqmkoPDugP+9Y= go.sia.tech/hostd v1.0.2/go.mod h1:zGw+AGVmazAp4ydvo7bZLNKTy1J51RI6Mp/oxRtYT6c= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 5ca7141d5..dd3cd6e31 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2107,7 +2107,7 @@ func TestMultipartUploads(t *testing.T) { PartNumber: 3, ETag: etag3, }, - }) + }, api.CompleteMultipartOptions{}) tt.OK(err) if ui.ETag == "" { t.Fatal("unexpected response:", ui) @@ -2435,7 +2435,7 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { PartNumber: 3, ETag: resp3.ETag, }, - })) + }, api.CompleteMultipartOptions{})) // download the object and verify its integrity dst := new(bytes.Buffer) diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index b25e11871..c8c6bb334 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -254,6 +254,38 @@ func TestS3ObjectMetadata(t *testing.T) { head, err = s3.StatObject(context.Background(), api.DefaultBucketName, t.Name(), minio.StatObjectOptions{}) tt.OK(err) assertMetadata(metadata, head.UserMetadata) + + // upload a file using multipart upload + core := cluster.S3Core + uid, err := core.NewMultipartUpload(context.Background(), api.DefaultBucketName, "multi", minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "New": "1", + }, + }) + tt.OK(err) + data := frand.Bytes(3) + + part, err := core.PutObjectPart(context.Background(), api.DefaultBucketName, "foo", uid, 1, bytes.NewReader(data), int64(len(data)), minio.PutObjectPartOptions{}) + tt.OK(err) + _, err = core.CompleteMultipartUpload(context.Background(), api.DefaultBucketName, "multi", uid, []minio.CompletePart{ + { + PartNumber: part.PartNumber, + ETag: part.ETag, + }, + }, minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Complete": "2", + }, + }) + tt.OK(err) + + // check metadata + head, err = s3.StatObject(context.Background(), api.DefaultBucketName, "multi", minio.StatObjectOptions{}) + tt.OK(err) + assertMetadata(map[string]string{ + "New": "1", + "Complete": "2", + }, head.UserMetadata) } func TestS3Authentication(t *testing.T) { diff --git a/s3/authentication.go b/s3/authentication.go index 9d5da4f1a..215da52b7 100644 --- a/s3/authentication.go +++ b/s3/authentication.go @@ -264,9 +264,9 @@ func (b *authenticatedBackend) AbortMultipartUpload(ctx context.Context, bucket, return b.backend.AbortMultipartUpload(ctx, bucket, object, id) } -func (b *authenticatedBackend) CompleteMultipartUpload(ctx context.Context, bucket, object string, id gofakes3.UploadID, input *gofakes3.CompleteMultipartUploadRequest) (resp *gofakes3.CompleteMultipartUploadResult, err error) { +func (b *authenticatedBackend) CompleteMultipartUpload(ctx context.Context, bucket, object string, id gofakes3.UploadID, meta map[string]string, input *gofakes3.CompleteMultipartUploadRequest) (resp *gofakes3.CompleteMultipartUploadResult, err error) { if !b.permsFromCtx(ctx, bucket).CompleteMultipartUpload { return nil, gofakes3.ErrAccessDenied } - return b.backend.CompleteMultipartUpload(ctx, bucket, object, id, input) + return b.backend.CompleteMultipartUpload(ctx, bucket, object, id, meta, input) } diff --git a/s3/backend.go b/s3/backend.go index c05a3ec98..7b5ea74f9 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -502,7 +502,8 @@ func (s *s3) AbortMultipartUpload(ctx context.Context, bucket, object string, id return nil } -func (s *s3) CompleteMultipartUpload(ctx context.Context, bucket, object string, id gofakes3.UploadID, input *gofakes3.CompleteMultipartUploadRequest) (*gofakes3.CompleteMultipartUploadResult, error) { +func (s *s3) CompleteMultipartUpload(ctx context.Context, bucket, object string, id gofakes3.UploadID, meta map[string]string, input *gofakes3.CompleteMultipartUploadRequest) (*gofakes3.CompleteMultipartUploadResult, error) { + convertToSiaMetadataHeaders(meta) var parts []api.MultipartCompletedPart for _, part := range input.Parts { parts = append(parts, api.MultipartCompletedPart{ @@ -510,7 +511,9 @@ func (s *s3) CompleteMultipartUpload(ctx context.Context, bucket, object string, PartNumber: part.PartNumber, }) } - resp, err := s.b.CompleteMultipartUpload(ctx, bucket, "/"+object, string(id), parts) + resp, err := s.b.CompleteMultipartUpload(ctx, bucket, "/"+object, string(id), parts, api.CompleteMultipartOptions{ + Metadata: api.ExtractObjectUserMetadataFrom(meta), + }) if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } diff --git a/s3/s3.go b/s3/s3.go index dc7ac664b..95c2e98e6 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -36,7 +36,7 @@ type bus interface { Object(ctx context.Context, bucket, path string, opts api.GetObjectOptions) (res api.ObjectsResponse, err error) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) (err error) - CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart) (_ api.MultipartCompleteResponse, err error) + CompleteMultipartUpload(ctx context.Context, bucket, path, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (api.MultipartCreateResponse, error) MultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker string, maxUploads int) (resp api.MultipartListUploadsResponse, _ error) MultipartUploadParts(ctx context.Context, bucket, object string, uploadID string, marker int, limit int64) (resp api.MultipartListPartsResponse, _ error) diff --git a/stores/metadata.go b/stores/metadata.go index 529d7ec89..5c35f90ce 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1724,7 +1724,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // NOTE: the metadata is not deleted because this delete will cascade, // if we stop recreating the object we have to make sure to delete the // object's metadata before trying to recreate it - _, err := s.deleteObject(tx, bucket, path) + deleted, err := s.deleteObject(tx, bucket, path) if err != nil { return fmt.Errorf("failed to delete object: %w", err) } @@ -1771,6 +1771,10 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("failed to create user metadata: %w", err) } + // Delete slabs. + if deleted > 0 { + return pruneSlabs(tx) + } return nil }) } @@ -2727,13 +2731,7 @@ func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, if tx.Error != nil { return 0, tx.Error } - numDeleted := tx.RowsAffected - if numDeleted == 0 { - return 0, nil // nothing to prune if no object was deleted - } else if err := pruneSlabs(tx); err != nil { - return numDeleted, err - } - return numDeleted, nil + return tx.RowsAffected, nil } // deleteObjects deletes a batch of objects from the database. The order of diff --git a/stores/multipart.go b/stores/multipart.go index 864503455..be3333077 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -313,7 +313,7 @@ func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string }) } -func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path string, uploadID string, parts []api.MultipartCompletedPart) (_ api.MultipartCompleteResponse, err error) { +func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path string, uploadID string, parts []api.MultipartCompletedPart, opts api.CompleteMultipartOptions) (_ api.MultipartCompleteResponse, err error) { // Sanity check input parts. if !sort.SliceIsSorted(parts, func(i, j int) bool { return parts[i].PartNumber < parts[j].PartNumber @@ -434,6 +434,14 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str } } + // Create new metadata. + if len(opts.Metadata) > 0 { + err = s.createUserMetadata(tx, obj.ID, opts.Metadata) + if err != nil { + return fmt.Errorf("failed to create metadata: %w", err) + } + } + // Update user metadata. if err := tx. Model(&dbObjectUserMetadata{}). diff --git a/stores/multipart_test.go b/stores/multipart_test.go index 37b294418..50272fcda 100644 --- a/stores/multipart_test.go +++ b/stores/multipart_test.go @@ -91,7 +91,7 @@ func TestMultipartUploadWithUploadPackingRegression(t *testing.T) { t.Fatal(err) } else if nSlicesBefore == 0 { t.Fatal("expected some slices") - } else if _, err = ss.CompleteMultipartUpload(ctx, api.DefaultBucketName, objName, resp.UploadID, parts); err != nil { + } else if _, err = ss.CompleteMultipartUpload(ctx, api.DefaultBucketName, objName, resp.UploadID, parts, api.CompleteMultipartOptions{}); err != nil { t.Fatal(err) } else if err := ss.db.Model(&dbSlice{}).Count(&nSlicesAfter).Error; err != nil { t.Fatal(err) From 894af25816d9a11bcb1384ac1c3068302ad10d01 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 11 Mar 2024 14:22:42 +0100 Subject: [PATCH 010/201] stores: revert some changes --- stores/metadata.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 5c35f90ce..529d7ec89 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1724,7 +1724,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // NOTE: the metadata is not deleted because this delete will cascade, // if we stop recreating the object we have to make sure to delete the // object's metadata before trying to recreate it - deleted, err := s.deleteObject(tx, bucket, path) + _, err := s.deleteObject(tx, bucket, path) if err != nil { return fmt.Errorf("failed to delete object: %w", err) } @@ -1771,10 +1771,6 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("failed to create user metadata: %w", err) } - // Delete slabs. - if deleted > 0 { - return pruneSlabs(tx) - } return nil }) } @@ -2731,7 +2727,13 @@ func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, if tx.Error != nil { return 0, tx.Error } - return tx.RowsAffected, nil + numDeleted := tx.RowsAffected + if numDeleted == 0 { + return 0, nil // nothing to prune if no object was deleted + } else if err := pruneSlabs(tx); err != nil { + return numDeleted, err + } + return numDeleted, nil } // deleteObjects deletes a batch of objects from the database. The order of From 4648806009a5ade5da12015fa3895d601e0d6e52 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 11:49:01 +0100 Subject: [PATCH 011/201] api: add missing json tags in MultipartCompleteRequest --- api/multipart.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/multipart.go b/api/multipart.go index 0654386c0..ee26567b1 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -79,11 +79,11 @@ type ( } MultipartCompleteRequest struct { - Bucket string `json:"bucket"` - Metadata ObjectUserMetadata - Path string `json:"path"` - UploadID string `json:"uploadID"` - Parts []MultipartCompletedPart + Bucket string `json:"bucket"` + Metadata ObjectUserMetadata `json:"metadata"` + Path string `json:"path"` + UploadID string `json:"uploadID"` + Parts []MultipartCompletedPart `json:"parts"` } MultipartCreateRequest struct { From 0fd50d4001bd3ebf7d6f38e102157b7b4fee7f83 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 11:27:22 +0100 Subject: [PATCH 012/201] stores: delete objects first --- object/object.go | 17 ----------------- stores/metadata.go | 12 ++++++------ stores/multipart.go | 12 ++++++------ worker/upload.go | 9 ++++++++- 4 files changed, 20 insertions(+), 30 deletions(-) diff --git a/object/object.go b/object/object.go index 965ebce2a..e8243fac1 100644 --- a/object/object.go +++ b/object/object.go @@ -3,7 +3,6 @@ package object import ( "bytes" "crypto/cipher" - "crypto/md5" "encoding/binary" "encoding/hex" "fmt" @@ -146,22 +145,6 @@ func (o Object) Contracts() map[types.PublicKey]map[types.FileContractID]struct{ return usedContracts } -func (o *Object) ComputeETag() string { - // calculate the eTag using the precomputed sector roots to avoid having to - // hash the entire object again. - h := md5.New() - b := make([]byte, 8) - for _, slab := range o.Slabs { - binary.LittleEndian.PutUint32(b[:4], slab.Offset) - binary.LittleEndian.PutUint32(b[4:], slab.Length) - h.Write(b) - for _, shard := range slab.Shards { - h.Write(shard.Root[:]) - } - } - return string(hex.EncodeToString(h.Sum(nil))) -} - // TotalSize returns the total size of the object. func (o Object) TotalSize() int64 { var n int64 diff --git a/stores/metadata.go b/stores/metadata.go index 529d7ec89..320084315 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1708,12 +1708,6 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // UpdateObject is ACID. return s.retryTransaction(func(tx *gorm.DB) error { - // Fetch contract set. - var cs dbContractSet - if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { - return fmt.Errorf("contract set %v not found: %w", contractSet, err) - } - // Try to delete. We want to get rid of the object and its slices if it // exists. // @@ -1729,6 +1723,12 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("failed to delete object: %w", err) } + // Fetch contract set. + var cs dbContractSet + if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { + return fmt.Errorf("contract set %v not found: %w", contractSet, err) + } + // Insert a new object. objKey, err := o.Key.MarshalBinary() if err != nil { diff --git a/stores/multipart.go b/stores/multipart.go index be3333077..76c30c734 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -327,6 +327,12 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str } var eTag string err = s.retryTransaction(func(tx *gorm.DB) error { + // Delete potentially existing object. + _, err := s.deleteObject(tx, bucket, path) + if err != nil { + return fmt.Errorf("failed to delete object: %w", err) + } + // Find multipart upload. var mu dbMultipartUpload err = tx.Where("upload_id = ?", uploadID). @@ -347,12 +353,6 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str return fmt.Errorf("bucket name mismatch: %v != %v: %w", mu.DBBucket.Name, bucket, api.ErrBucketNotFound) } - // Delete potentially existing object. - _, err := s.deleteObject(tx, bucket, path) - if err != nil { - return fmt.Errorf("failed to delete object: %w", err) - } - // Sort the parts. sort.Slice(mu.Parts, func(i, j int) bool { return mu.Parts[i].PartNumber < mu.Parts[j].PartNumber diff --git a/worker/upload.go b/worker/upload.go index c5e86a166..ab84e2b37 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -2,6 +2,8 @@ package worker import ( "context" + "crypto/md5" + "encoding/hex" "errors" "fmt" "io" @@ -390,6 +392,11 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // create the object o := object.NewObject(up.ec) + // create the md5 hasher for the etag + // NOTE: we use md5 since it's s3 compatible and clients expect it to be md5 + hasher := md5.New() + r = io.TeeReader(r, hasher) + // create the cipher reader cr, err := o.Encrypt(r, up.encryptionOffset) if err != nil { @@ -520,7 +527,7 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a } // compute etag - eTag = o.ComputeETag() + eTag = hex.EncodeToString(hasher.Sum(nil)) // add partial slabs if len(partialSlab) > 0 { From 0f60a167709df7d754b518c555dd19dd517be155 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 13:10:06 +0100 Subject: [PATCH 013/201] stores: add context wo when deleteObject fails --- stores/metadata.go | 20 +++++++++++++------- worker/worker.go | 2 +- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 320084315..be143a3fa 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1474,7 +1474,7 @@ func (s *SQLStore) RenameObject(ctx context.Context, bucket, keyOld, keyNew stri if force { // delete potentially existing object at destination if _, err := s.deleteObject(tx, bucket, keyNew); err != nil { - return err + return fmt.Errorf("RenameObject: failed to delete object: %w", err) } } tx = tx.Exec(`UPDATE objects SET object_id = ? WHERE object_id = ? AND ?`, keyNew, keyOld, sqlWhereBucket("objects", bucket)) @@ -1539,6 +1539,13 @@ func (s *SQLStore) AddPartialSlab(ctx context.Context, data []byte, minShards, t func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (om api.ObjectMetadata, err error) { err = s.retryTransaction(func(tx *gorm.DB) error { + if srcBucket != dstBucket || srcPath != dstPath { + _, err = s.deleteObject(tx, dstBucket, dstPath) + if err != nil { + return fmt.Errorf("CopyObject: failed to delete object: %w", err) + } + } + var srcObj dbObject err = tx.Where("objects.object_id = ? AND DBBucket.name = ?", srcPath, srcBucket). Joins("DBBucket"). @@ -1565,10 +1572,6 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath } return tx.Save(&srcObj).Error } - _, err = s.deleteObject(tx, dstBucket, dstPath) - if err != nil { - return fmt.Errorf("failed to delete object: %w", err) - } var srcSlices []dbSlice err = tx.Where("db_object_id = ?", srcObj.ID). @@ -1720,7 +1723,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // object's metadata before trying to recreate it _, err := s.deleteObject(tx, bucket, path) if err != nil { - return fmt.Errorf("failed to delete object: %w", err) + return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } // Fetch contract set. @@ -1780,7 +1783,10 @@ func (s *SQLStore) RemoveObject(ctx context.Context, bucket, key string) error { var err error err = s.retryTransaction(func(tx *gorm.DB) error { rowsAffected, err = s.deleteObject(tx, bucket, key) - return err + if err != nil { + return fmt.Errorf("RemoveObject: failed to delete object: %w", err) + } + return nil }) if err != nil { return err diff --git a/worker/worker.go b/worker/worker.go index 9e4dacdd2..c2e08f915 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1098,7 +1098,7 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { if err := jc.Check("couldn't upload object", err); err != nil { if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) { + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) && !errors.Is(err, context.Canceled) { w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, mimeType, rs.MinShards, rs.TotalShards, len(contracts), up.UploadPacking, false, err)) } } From 401be07084112e5d8d9a923ca82d3b3b705beceb Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 15:00:07 +0100 Subject: [PATCH 014/201] stores: TestUpdateObjectParallel --- stores/metadata.go | 56 +++++++++++++------------ stores/metadata_test.go | 91 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 26 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index be143a3fa..83825048f 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1726,51 +1726,55 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } - // Fetch contract set. - var cs dbContractSet - if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { - return fmt.Errorf("contract set %v not found: %w", contractSet, err) - } - - // Insert a new object. + // Insert a new object. objKey, err := o.Key.MarshalBinary() if err != nil { return fmt.Errorf("failed to marshal object key: %w", err) } - var bucketID uint - err = tx.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). - Take(&bucketID).Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) - } else if err != nil { - return fmt.Errorf("failed to fetch bucket id: %w", err) - } - obj := dbObject{ - DBBucketID: bucketID, - ObjectID: path, - Key: objKey, - Size: o.TotalSize(), - MimeType: mimeType, - Etag: eTag, - } - err = tx.Create(&obj).Error + err = tx.Model(&dbObject{}). + Create(map[string]any{ + "db_bucket_id": gorm.Expr("(SELECT id from buckets WHERE buckets.name = ?)", bucket), + "object_id": path, + "key": objKey, + "size": o.TotalSize(), + "mime_type": mimeType, + "etag": eTag, + }).Error if err != nil { return fmt.Errorf("failed to create object: %w", err) } + // Get the id of the object + var objID uint + err = tx.Model(&dbObject{}). + Select("id"). + Where("object_id = ?", path). + Where("db_bucket_id = (SELECT id from buckets WHERE buckets.name = ?)", bucket). + Scan(&objID). + Error + if err != nil { + return err + } + // Fetch the used contracts. contracts, err := fetchUsedContracts(tx, usedContracts) if err != nil { return fmt.Errorf("failed to fetch used contracts: %w", err) } + // Fetch contract set. + var cs dbContractSet + if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { + return fmt.Errorf("contract set %v not found: %w", contractSet, err) + } + // Create all slices. This also creates any missing slabs or sectors. - if err := s.createSlices(tx, &obj.ID, nil, cs.ID, contracts, o.Slabs); err != nil { + if err := s.createSlices(tx, &objID, nil, cs.ID, contracts, o.Slabs); err != nil { return fmt.Errorf("failed to create slices: %w", err) } // Create all user metadata. - if err := s.createUserMetadata(tx, obj.ID, metadata); err != nil { + if err := s.createUserMetadata(tx, objID, metadata); err != nil { return fmt.Errorf("failed to create user metadata: %w", err) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index f5461147c..802e6a699 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -10,6 +10,7 @@ import ( "reflect" "sort" "strings" + "sync" "testing" "time" @@ -4545,3 +4546,93 @@ func TestTypeCurrency(t *testing.T) { } } } + +// TestUpdateObjectParallel calls UpdateObject from multiple threads in parallel +// while retries are disabled to make sure calling the same method from multiple +// threads won't cause deadlocks. +func TestUpdateObjectParallel(t *testing.T) { + cfg := defaultTestSQLStoreConfig + cfg.dir = t.TempDir() + cfg.persistent = true + ss := newTestSQLStore(t, cfg) + ss.retryTransactionIntervals = []time.Duration{0} // don't retry + defer ss.Close() + + // create 2 hosts + hks, err := ss.addTestHosts(2) + if err != nil { + t.Fatal(err) + } + hk1, hk2 := hks[0], hks[1] + + // create 2 contracts + fcids, _, err := ss.addTestContracts(hks) + if err != nil { + t.Fatal(err) + } + fcid1, fcid2 := fcids[0], fcids[1] + + c := make(chan string) + ctx, cancel := context.WithCancel(context.Background()) + work := func() { + t.Helper() + defer cancel() + for name := range c { + // create an object + obj := object.Object{ + Key: object.GenerateEncryptionKey(), + Slabs: []object.SlabSlice{ + { + Slab: object.Slab{ + Health: 1.0, + Key: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + }, + Offset: 10, + Length: 100, + }, + { + Slab: object.Slab{ + Health: 1.0, + Key: object.GenerateEncryptionKey(), + MinShards: 2, + Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + }, + Offset: 20, + Length: 200, + }, + }, + } + + // update the object + if err := ss.UpdateObject(context.Background(), api.DefaultBucketName, name, testContractSet, testETag, testMimeType, testMetadata, obj); err != nil { + t.Error(err) + return + } + } + } + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + work() + wg.Done() + }() + } + + // create 1000 objects and then overwrite them + for i := 0; i < 2; i++ { + for j := 0; j < 1000; j++ { + select { + case c <- fmt.Sprintf("object-%d", j): + case <-ctx.Done(): + return + } + } + } + + close(c) + wg.Wait() +} From 47fafa09f6ae74f4b0d8fb54076b2a63536f38f9 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 15:08:47 +0100 Subject: [PATCH 015/201] stores: use dir instead of cfg.dir in newTestSQLStore --- stores/metadata_test.go | 1 - stores/sql_test.go | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 802e6a699..cbecffae2 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4552,7 +4552,6 @@ func TestTypeCurrency(t *testing.T) { // threads won't cause deadlocks. func TestUpdateObjectParallel(t *testing.T) { cfg := defaultTestSQLStoreConfig - cfg.dir = t.TempDir() cfg.persistent = true ss := newTestSQLStore(t, cfg) ss.retryTransactionIntervals = []time.Duration{0} // don't retry diff --git a/stores/sql_test.go b/stores/sql_test.go index 776e3e10e..17c296075 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -107,8 +107,8 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { conn = NewMySQLConnection(dbUser, dbPassword, dbURI, dbName) connMetrics = NewMySQLConnection(dbUser, dbPassword, dbURI, dbMetricsName) } else if cfg.persistent { - conn = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) - connMetrics = NewSQLiteConnection(filepath.Join(cfg.dir, "metrics.sqlite")) + conn = NewSQLiteConnection(filepath.Join(dir, "db.sqlite")) + connMetrics = NewSQLiteConnection(filepath.Join(dir, "metrics.sqlite")) } else { conn = NewEphemeralSQLiteConnection(dbName) connMetrics = NewEphemeralSQLiteConnection(dbMetricsName) From 83f1e646688bbe4d61d733a8d0ec106d6d92739b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 15:18:27 +0100 Subject: [PATCH 016/201] stores: fix TestBucketObjects --- stores/metadata.go | 18 ++++++++++++++++-- stores/metadata_test.go | 2 +- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 83825048f..f32162bf4 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1706,6 +1706,19 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } } + // check if bucket exists - doing so before the transaction to avoid + // deadlocks while risking the bucket updating in the meantime + var bucketID uint + if resp := s.db. + Model(&dbBucket{}). + Select("id"). + Where("name", bucket). + Scan(&bucketID); resp.Error != nil { + return resp.Error + } else if resp.RowsAffected == 0 { + return api.ErrBucketNotFound + } + // collect all used contracts usedContracts := o.Contracts() @@ -1733,7 +1746,8 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } err = tx.Model(&dbObject{}). Create(map[string]any{ - "db_bucket_id": gorm.Expr("(SELECT id from buckets WHERE buckets.name = ?)", bucket), + "created_at": time.Now(), + "db_bucket_id": bucketID, "object_id": path, "key": objKey, "size": o.TotalSize(), @@ -1749,7 +1763,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, err = tx.Model(&dbObject{}). Select("id"). Where("object_id = ?", path). - Where("db_bucket_id = (SELECT id from buckets WHERE buckets.name = ?)", bucket). + Where("db_bucket_id", bucketID). Scan(&objID). Error if err != nil { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index cbecffae2..442860f2f 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4613,7 +4613,7 @@ func TestUpdateObjectParallel(t *testing.T) { } var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for i := 0; i < 4; i++ { wg.Add(1) go func() { work() From d4e4ed8d27cd15cc2c8be355a2d7e78b828698ed Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 15:24:27 +0100 Subject: [PATCH 017/201] stores: fix config for mysql --- stores/metadata_test.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 442860f2f..3ee40b182 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4552,7 +4552,12 @@ func TestTypeCurrency(t *testing.T) { // threads won't cause deadlocks. func TestUpdateObjectParallel(t *testing.T) { cfg := defaultTestSQLStoreConfig - cfg.persistent = true + + // check if we are running against mysql and only persist if we aren't + dbURI, _, _, _ := DBConfigFromEnv() + if dbURI == "" { + cfg.persistent = true + } ss := newTestSQLStore(t, cfg) ss.retryTransactionIntervals = []time.Duration{0} // don't retry defer ss.Close() From 2610af228dbaf15c2dcf0fdefa75427a224739ed Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 15:53:51 +0100 Subject: [PATCH 018/201] stores: move pruning of slabs out of transaction in UpdateObject --- stores/metadata.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index f32162bf4..3f9cf22c5 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1723,7 +1723,8 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, usedContracts := o.Contracts() // UpdateObject is ACID. - return s.retryTransaction(func(tx *gorm.DB) error { + var nDeleted int64 + err := s.retryTransaction(func(tx *gorm.DB) error { // Try to delete. We want to get rid of the object and its slices if it // exists. // @@ -1734,10 +1735,12 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // NOTE: the metadata is not deleted because this delete will cascade, // if we stop recreating the object we have to make sure to delete the // object's metadata before trying to recreate it - _, err := s.deleteObject(tx, bucket, path) - if err != nil { - return fmt.Errorf("UpdateObject: failed to delete object: %w", err) + var err error + resp := tx.Exec("DELETE FROM objects WHERE object_id = ? AND db_bucket_id = ?", path, bucketID) + if resp.Error != nil { + return fmt.Errorf("UpdateObject: failed to delete object: %w", resp.Error) } + nDeleted = resp.RowsAffected // Insert a new object. objKey, err := o.Key.MarshalBinary() @@ -1791,9 +1794,17 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, if err := s.createUserMetadata(tx, objID, metadata); err != nil { return fmt.Errorf("failed to create user metadata: %w", err) } - return nil }) + if err != nil { + return err + } + if nDeleted > 0 { + if err := s.retryTransaction(pruneSlabs); err != nil { + return fmt.Errorf("UpdateObject: failed to prune slabs: %w", err) + } + } + return pruneSlabs(s.db) } func (s *SQLStore) RemoveObject(ctx context.Context, bucket, key string) error { From 239f01aa2a77ed86ff14be78d30abf0314a13a9d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 16:02:59 +0100 Subject: [PATCH 019/201] stores: fix TestSQLMetadataStore --- stores/metadata_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 3ee40b182..dbb688d9b 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1058,9 +1058,9 @@ func TestSQLMetadataStore(t *testing.T) { // incremented due to the object and slab being overwritten. two := uint(2) expectedObj.Slabs[0].DBObjectID = &two - expectedObj.Slabs[0].DBSlabID = 3 + expectedObj.Slabs[0].DBSlabID = 1 expectedObj.Slabs[1].DBObjectID = &two - expectedObj.Slabs[1].DBSlabID = 4 + expectedObj.Slabs[1].DBSlabID = 2 if !reflect.DeepEqual(obj, expectedObj) { t.Fatal("object mismatch", cmp.Diff(obj, expectedObj)) } @@ -1082,7 +1082,7 @@ func TestSQLMetadataStore(t *testing.T) { TotalShards: 1, Shards: []dbSector{ { - DBSlabID: 3, + DBSlabID: 1, SlabIndex: 1, Root: obj1.Slabs[0].Shards[0].Root[:], LatestHost: publicKey(obj1.Slabs[0].Shards[0].LatestHost), @@ -1122,7 +1122,7 @@ func TestSQLMetadataStore(t *testing.T) { TotalShards: 1, Shards: []dbSector{ { - DBSlabID: 4, + DBSlabID: 2, SlabIndex: 1, Root: obj1.Slabs[1].Shards[0].Root[:], LatestHost: publicKey(obj1.Slabs[1].Shards[0].LatestHost), From b784b393572e7e96ca55691d53a91fe49685ee49 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 12 Mar 2024 16:24:10 +0100 Subject: [PATCH 020/201] stores: use given context through WithContext on gorm.DB --- alerts/alerts_test.go | 12 +-- bus/bus.go | 8 +- internal/node/node.go | 5 +- stores/accounts.go | 10 ++- stores/autopilot.go | 12 ++- stores/hostdb.go | 27 ++++--- stores/metadata.go | 159 +++++++++++++++++----------------------- stores/metadata_test.go | 4 +- stores/metrics.go | 22 +++--- stores/multipart.go | 14 ++-- stores/slabbuffer.go | 2 +- stores/sql.go | 11 +-- stores/sql_test.go | 2 +- stores/webhooks.go | 14 ++-- stores/webhooks_test.go | 17 +++-- webhooks/webhooks.go | 24 +++--- 16 files changed, 170 insertions(+), 173 deletions(-) diff --git a/alerts/alerts_test.go b/alerts/alerts_test.go index 2cc20c57b..ff927ccdc 100644 --- a/alerts/alerts_test.go +++ b/alerts/alerts_test.go @@ -22,27 +22,29 @@ type testWebhookStore struct { listed int } -func (s *testWebhookStore) DeleteWebhook(wb webhooks.Webhook) error { +func (s *testWebhookStore) DeleteWebhook(_ context.Context, wb webhooks.Webhook) error { s.mu.Lock() defer s.mu.Unlock() s.deleted++ return nil } -func (s *testWebhookStore) AddWebhook(wb webhooks.Webhook) error { +func (s *testWebhookStore) AddWebhook(_ context.Context, wb webhooks.Webhook) error { s.mu.Lock() defer s.mu.Unlock() s.added++ return nil } -func (s *testWebhookStore) Webhooks() ([]webhooks.Webhook, error) { +func (s *testWebhookStore) Webhooks(_ context.Context) ([]webhooks.Webhook, error) { s.mu.Lock() defer s.mu.Unlock() s.listed++ return nil, nil } +var _ webhooks.WebhookStore = (*testWebhookStore)(nil) + func TestWebhooks(t *testing.T) { store := &testWebhookStore{} mgr, err := webhooks.NewManager(zap.NewNop().Sugar(), store) @@ -75,7 +77,7 @@ func TestWebhooks(t *testing.T) { if hookID := wh.String(); hookID != fmt.Sprintf("%v.%v.%v", wh.URL, wh.Module, "") { t.Fatalf("wrong result for wh.String(): %v != %v", wh.String(), hookID) } - err = mgr.Register(wh) + err = mgr.Register(context.Background(), wh) if err != nil { t.Fatal(err) } @@ -110,7 +112,7 @@ func TestWebhooks(t *testing.T) { } // unregister hook - if err := mgr.Delete(webhooks.Webhook{ + if err := mgr.Delete(context.Background(), webhooks.Webhook{ Event: hooks[0].Event, Module: hooks[0].Module, URL: hooks[0].URL, diff --git a/bus/bus.go b/bus/bus.go index 045b8e82a..e63ddb7b4 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -189,7 +189,7 @@ type ( EphemeralAccountStore interface { Accounts(context.Context) ([]api.Account, error) SaveAccounts(context.Context, []api.Account) error - SetUncleanShutdown() error + SetUncleanShutdown(context.Context) error } MetricsStore interface { @@ -2022,7 +2022,7 @@ func (b *bus) webhookHandlerDelete(jc jape.Context) { if jc.Decode(&wh) != nil { return } - err := b.hooks.Delete(wh) + err := b.hooks.Delete(jc.Request.Context(), wh) if errors.Is(err, webhooks.ErrWebhookNotFound) { jc.Error(fmt.Errorf("webhook for URL %v and event %v.%v not found", wh.URL, wh.Module, wh.Event), http.StatusNotFound) return @@ -2044,7 +2044,7 @@ func (b *bus) webhookHandlerPost(jc jape.Context) { if jc.Decode(&req) != nil { return } - err := b.hooks.Register(webhooks.Webhook{ + err := b.hooks.Register(jc.Request.Context(), webhooks.Webhook{ Event: req.Event, Module: req.Module, URL: req.URL, @@ -2410,7 +2410,7 @@ func New(s Syncer, am *alerts.Manager, hm *webhooks.Manager, cm ChainManager, tp // mark the shutdown as unclean, this will be overwritten when/if the // accounts are saved on shutdown - if err := eas.SetUncleanShutdown(); err != nil { + if err := eas.SetUncleanShutdown(ctx); err != nil { return nil, fmt.Errorf("failed to mark account shutdown as unclean: %w", err) } return b, nil diff --git a/internal/node/node.go b/internal/node/node.go index e94cfbb4d..40f7f70d8 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -137,11 +137,14 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht cancelSubscribe := make(chan struct{}) go func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + subscribeErr := cs.ConsensusSetSubscribe(sqlStore, ccid, cancelSubscribe) if errors.Is(subscribeErr, modules.ErrInvalidConsensusChangeID) { l.Warn("Invalid consensus change ID detected - resyncing consensus") // Reset the consensus state within the database and rescan. - if err := sqlStore.ResetConsensusSubscription(); err != nil { + if err := sqlStore.ResetConsensusSubscription(ctx); err != nil { l.Fatal(fmt.Sprintf("Failed to reset consensus subscription of SQLStore: %v", err)) return } diff --git a/stores/accounts.go b/stores/accounts.go index d519df9dd..69f4aeff8 100644 --- a/stores/accounts.go +++ b/stores/accounts.go @@ -55,7 +55,7 @@ func (a dbAccount) convert() api.Account { // Accounts returns all accounts from the db. func (s *SQLStore) Accounts(ctx context.Context) ([]api.Account, error) { var dbAccounts []dbAccount - if err := s.db.Find(&dbAccounts).Error; err != nil { + if err := s.db.WithContext(ctx).Find(&dbAccounts).Error; err != nil { return nil, err } accounts := make([]api.Account, len(dbAccounts)) @@ -69,8 +69,10 @@ func (s *SQLStore) Accounts(ctx context.Context) ([]api.Account, error) { // also sets the 'requires_sync' flag. That way, the autopilot will know to sync // all accounts after an unclean shutdown and the bus will know not to apply // drift. -func (s *SQLStore) SetUncleanShutdown() error { - return s.db.Model(&dbAccount{}). +func (s *SQLStore) SetUncleanShutdown(ctx context.Context) error { + return s.db. + WithContext(ctx). + Model(&dbAccount{}). Where("TRUE"). Updates(map[string]interface{}{ "clean_shutdown": false, @@ -95,7 +97,7 @@ func (s *SQLStore) SaveAccounts(ctx context.Context, accounts []api.Account) err RequiresSync: acc.RequiresSync, } } - return s.db.Clauses(clause.OnConflict{ + return s.db.WithContext(ctx).Clauses(clause.OnConflict{ Columns: []clause.Column{{Name: "account_id"}}, UpdateAll: true, }).Create(&dbAccounts).Error diff --git a/stores/autopilot.go b/stores/autopilot.go index 6dc88a692..5a5c5ed2d 100644 --- a/stores/autopilot.go +++ b/stores/autopilot.go @@ -34,6 +34,7 @@ func (c dbAutopilot) convert() api.Autopilot { func (s *SQLStore) Autopilots(ctx context.Context) ([]api.Autopilot, error) { var entities []dbAutopilot err := s.db. + WithContext(ctx). Model(&dbAutopilot{}). Find(&entities). Error @@ -51,6 +52,7 @@ func (s *SQLStore) Autopilots(ctx context.Context) ([]api.Autopilot, error) { func (s *SQLStore) Autopilot(ctx context.Context, id string) (api.Autopilot, error) { var entity dbAutopilot err := s.db. + WithContext(ctx). Model(&dbAutopilot{}). Where("identifier = ?", id). First(&entity). @@ -73,10 +75,12 @@ func (s *SQLStore) UpdateAutopilot(ctx context.Context, ap api.Autopilot) error } // upsert - return s.db.Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "identifier"}}, - UpdateAll: true, - }).Create(&dbAutopilot{ + return s.db. + WithContext(ctx). + Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "identifier"}}, + UpdateAll: true, + }).Create(&dbAutopilot{ Identifier: ap.ID, Config: ap.Config, CurrentPeriod: ap.CurrentPeriod, diff --git a/stores/hostdb.go b/stores/hostdb.go index 37aa18ee8..0b59789df 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -427,6 +427,7 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.H var h dbHost tx := ss.db. + WithContext(ctx). Where(&dbHost{PublicKey: publicKey(hostKey)}). Preload("Allowlist"). Preload("Blocklist"). @@ -456,6 +457,7 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, var hostAddresses []hostdb.HostAddress err := ss.db. + WithContext(ctx). Model(&dbHost{}). Where("last_scan < ?", maxLastScan.UnixNano()). Offset(offset). @@ -546,6 +548,7 @@ func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures ui // fetch all hosts outside of the transaction var hosts []dbHost if err := ss.db. + WithContext(ctx). Model(&dbHost{}). Where("recent_downtime >= ? AND recent_scan_failures >= ?", maxDowntime, minRecentFailures). Find(&hosts). @@ -561,7 +564,7 @@ func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures ui // remove every host one by one var errs []error for _, h := range hosts { - if err := ss.retryTransaction(func(tx *gorm.DB) error { + if err := ss.retryTransaction(ctx, func(tx *gorm.DB) error { // fetch host contracts hcs, err := contractsForHost(tx, h) if err != nil { @@ -575,7 +578,7 @@ func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures ui } // archive host contracts - if err := archiveContracts(ctx, tx, hcs, toArchive); err != nil { + if err := archiveContracts(tx, hcs, toArchive); err != nil { return err } @@ -609,7 +612,7 @@ func (ss *SQLStore) UpdateHostAllowlistEntries(ctx context.Context, add, remove // clear allowlist if clear { - return ss.retryTransaction(func(tx *gorm.DB) error { + return ss.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Where("TRUE").Delete(&dbAllowlistEntry{}).Error }) } @@ -624,7 +627,7 @@ func (ss *SQLStore) UpdateHostAllowlistEntries(ctx context.Context, add, remove toDelete[i] = publicKey(entry) } - return ss.retryTransaction(func(tx *gorm.DB) error { + return ss.retryTransaction(ctx, func(tx *gorm.DB) error { if len(toInsert) > 0 { if err := tx.Create(&toInsert).Error; err != nil { return err @@ -648,7 +651,7 @@ func (ss *SQLStore) UpdateHostBlocklistEntries(ctx context.Context, add, remove // clear blocklist if clear { - return ss.retryTransaction(func(tx *gorm.DB) error { + return ss.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Where("TRUE").Delete(&dbBlocklistEntry{}).Error }) } @@ -658,7 +661,7 @@ func (ss *SQLStore) UpdateHostBlocklistEntries(ctx context.Context, add, remove toInsert = append(toInsert, dbBlocklistEntry{Entry: entry}) } - return ss.retryTransaction(func(tx *gorm.DB) error { + return ss.retryTransaction(ctx, func(tx *gorm.DB) error { if len(toInsert) > 0 { if err := tx.Create(&toInsert).Error; err != nil { return err @@ -676,6 +679,7 @@ func (ss *SQLStore) UpdateHostBlocklistEntries(ctx context.Context, add, remove func (ss *SQLStore) HostAllowlist(ctx context.Context) (allowlist []types.PublicKey, err error) { var pubkeys []publicKey err = ss.db. + WithContext(ctx). Model(&dbAllowlistEntry{}). Pluck("entry", &pubkeys). Error @@ -688,6 +692,7 @@ func (ss *SQLStore) HostAllowlist(ctx context.Context) (allowlist []types.Public func (ss *SQLStore) HostBlocklist(ctx context.Context) (blocklist []string, err error) { err = ss.db. + WithContext(ctx). Model(&dbBlocklistEntry{}). Pluck("entry", &blocklist). Error @@ -719,7 +724,7 @@ func (ss *SQLStore) RecordHostScans(ctx context.Context, scans []hostdb.HostScan end = len(hks) } var batchHosts []dbHost - if err := ss.db.Where("public_key IN (?)", hks[i:end]). + if err := ss.db.WithContext(ctx).Where("public_key IN (?)", hks[i:end]). Find(&batchHosts).Error; err != nil { return err } @@ -732,7 +737,7 @@ func (ss *SQLStore) RecordHostScans(ctx context.Context, scans []hostdb.HostScan // Write the interactions and update to the hosts atomically within a single // transaction. - return ss.retryTransaction(func(tx *gorm.DB) error { + return ss.retryTransaction(ctx, func(tx *gorm.DB) error { // Handle scans for _, scan := range scans { host, exists := hostMap[publicKey(scan.HostKey)] @@ -841,7 +846,7 @@ func (ss *SQLStore) RecordPriceTables(ctx context.Context, priceTableUpdate []ho end = len(hks) } var batchHosts []dbHost - if err := ss.db.Where("public_key IN (?)", hks[i:end]). + if err := ss.db.WithContext(ctx).Where("public_key IN (?)", hks[i:end]). Find(&batchHosts).Error; err != nil { return err } @@ -854,7 +859,7 @@ func (ss *SQLStore) RecordPriceTables(ctx context.Context, priceTableUpdate []ho // Write the interactions and update to the hosts atomically within a single // transaction. - return ss.retryTransaction(func(tx *gorm.DB) error { + return ss.retryTransaction(ctx, func(tx *gorm.DB) error { // Handle price table updates for _, ptu := range priceTableUpdate { host, exists := hostMap[publicKey(ptu.HostKey)] @@ -1086,7 +1091,7 @@ func updateBlocklist(tx *gorm.DB, hk types.PublicKey, allowlist []dbAllowlistEnt } func (s *SQLStore) ResetLostSectors(ctx context.Context, hk types.PublicKey) error { - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Model(&dbHost{}). Where("public_key", publicKey(hk)). Update("lost_sectors", 0). diff --git a/stores/metadata.go b/stores/metadata.go index 529d7ec89..c45422cfa 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -480,6 +480,7 @@ func (raw rawObject) toSlabSlice() (slice object.SlabSlice, _ error) { func (s *SQLStore) Bucket(ctx context.Context, bucket string) (api.Bucket, error) { var b dbBucket err := s.db. + WithContext(ctx). Model(&dbBucket{}). Where("name = ?", bucket). Take(&b). @@ -498,7 +499,7 @@ func (s *SQLStore) Bucket(ctx context.Context, bucket string) (api.Bucket, error func (s *SQLStore) CreateBucket(ctx context.Context, bucket string, policy api.BucketPolicy) error { // Create bucket. - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { res := tx.Clauses(clause.OnConflict{ DoNothing: true, }). @@ -520,7 +521,7 @@ func (s *SQLStore) UpdateBucketPolicy(ctx context.Context, bucket string, policy if err != nil { return err } - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { return tx. Model(&dbBucket{}). Where("name", bucket). @@ -534,7 +535,7 @@ func (s *SQLStore) UpdateBucketPolicy(ctx context.Context, bucket string, policy func (s *SQLStore) DeleteBucket(ctx context.Context, bucket string) error { // Delete bucket. - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { var b dbBucket if err := tx.Take(&b, "name = ?", bucket).Error; errors.Is(err, gorm.ErrRecordNotFound) { return api.ErrBucketNotFound @@ -561,6 +562,7 @@ func (s *SQLStore) DeleteBucket(ctx context.Context, bucket string) error { func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { var buckets []dbBucket err := s.db. + WithContext(ctx). Model(&dbBucket{}). Find(&buckets). Error @@ -583,10 +585,12 @@ func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { // reduce locking and make sure all results are consistent, everything is done // within a single transaction. func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + db := s.db.WithContext(ctx) + // fetch bucket id if a bucket was specified var bucketID uint if opts.Bucket != "" { - err := s.db.Model(&dbBucket{}).Select("id").Where("name = ?", opts.Bucket).Take(&bucketID).Error + err := db.Model(&dbBucket{}).Select("id").Where("name = ?", opts.Bucket).Take(&bucketID).Error if err != nil { return api.ObjectsStatsResponse{}, err } @@ -598,7 +602,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) MinHealth float64 TotalObjectsSize uint64 } - objInfoQuery := s.db. + objInfoQuery := db. Model(&dbObject{}). Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize") if opts.Bucket != "" { @@ -611,7 +615,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) // number of unfinished objects var unfinishedObjects uint64 - unfinishedObjectsQuery := s.db. + unfinishedObjectsQuery := db. Model(&dbMultipartUpload{}). Select("COUNT(*)") if opts.Bucket != "" { @@ -624,7 +628,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) // size of unfinished objects var totalUnfinishedObjectsSize uint64 - totalUnfinishedObjectsSizeQuery := s.db. + totalUnfinishedObjectsSizeQuery := db. Model(&dbMultipartPart{}). Joins("INNER JOIN multipart_uploads mu ON multipart_parts.db_multipart_upload_id = mu.id"). Select("COALESCE(SUM(size), 0)") @@ -637,7 +641,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) } var totalSectors int64 - totalSectorsQuery := s.db. + totalSectorsQuery := db. Table("slabs sla"). Select("COALESCE(SUM(total_shards), 0)"). Where("db_buffered_slab_id IS NULL") @@ -657,7 +661,7 @@ func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) } var totalUploaded int64 - err = s.db. + err = db. Model(&dbContract{}). Select("COALESCE(SUM(size), 0)"). Scan(&totalUploaded). @@ -707,7 +711,7 @@ func (s *SQLStore) AddContract(ctx context.Context, c rhpv2.ContractRevision, co return api.ContractMetadata{}, err } var added dbContract - if err = s.retryTransaction(func(tx *gorm.DB) error { + if err = s.retryTransaction(ctx, func(tx *gorm.DB) error { added, err = addContract(tx, c, contractPrice, totalCost, startHeight, types.FileContractID{}, cs) return err }); err != nil { @@ -719,12 +723,14 @@ func (s *SQLStore) AddContract(ctx context.Context, c rhpv2.ContractRevision, co } func (s *SQLStore) Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) { + db := s.db.WithContext(ctx) + // helper to check whether a contract set exists hasContractSet := func() error { if opts.ContractSet == "" { return nil } - err := s.db.Where("name", opts.ContractSet).Take(&dbContractSet{}).Error + err := db.Where("name", opts.ContractSet).Take(&dbContractSet{}).Error if errors.Is(err, gorm.ErrRecordNotFound) { return api.ErrContractSetNotFound } @@ -737,13 +743,13 @@ func (s *SQLStore) Contracts(ctx context.Context, opts api.ContractsOpts) ([]api Host dbHost `gorm:"embedded"` Name string } - tx := s.db + tx := db if opts.ContractSet == "" { // no filter, use all contracts tx = tx.Table("contracts") } else { // filter contracts by contract set first - tx = tx.Table("(?) contracts", s.db.Model(&dbContract{}). + tx = tx.Table("(?) contracts", db.Model(&dbContract{}). Select("contracts.*"). Joins("INNER JOIN hosts h ON h.id = contracts.host_id"). Joins("INNER JOIN contract_set_contracts csc ON csc.db_contract_id = contracts.id"). @@ -806,7 +812,7 @@ func (s *SQLStore) AddRenewedContract(ctx context.Context, c rhpv2.ContractRevis return api.ContractMetadata{}, err } var renewed dbContract - if err := s.retryTransaction(func(tx *gorm.DB) error { + if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { // Fetch contract we renew from. oldContract, err := contract(tx, fileContractID(renewedFrom)) if err != nil { @@ -846,7 +852,7 @@ func (s *SQLStore) AddRenewedContract(ctx context.Context, c rhpv2.ContractRevis func (s *SQLStore) AncestorContracts(ctx context.Context, id types.FileContractID, startHeight uint64) ([]api.ArchivedContract, error) { var ancestors []dbArchivedContract - err := s.db.Raw("WITH RECURSIVE ancestors AS (SELECT * FROM archived_contracts WHERE renewed_to = ? UNION ALL SELECT archived_contracts.* FROM ancestors, archived_contracts WHERE archived_contracts.renewed_to = ancestors.fcid) SELECT * FROM ancestors WHERE start_height >= ?", fileContractID(id), startHeight). + err := s.db.WithContext(ctx).Raw("WITH RECURSIVE ancestors AS (SELECT * FROM archived_contracts WHERE renewed_to = ? UNION ALL SELECT archived_contracts.* FROM ancestors, archived_contracts WHERE archived_contracts.renewed_to = ancestors.fcid) SELECT * FROM ancestors WHERE start_height >= ?", fileContractID(id), startHeight). Scan(&ancestors). Error if err != nil { @@ -877,8 +883,8 @@ func (s *SQLStore) ArchiveContracts(ctx context.Context, toArchive map[types.Fil } // archive them - if err := s.retryTransaction(func(tx *gorm.DB) error { - return archiveContracts(ctx, tx, cs, toArchive) + if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { + return archiveContracts(tx, cs, toArchive) }); err != nil { return err } @@ -890,6 +896,7 @@ func (s *SQLStore) ArchiveAllContracts(ctx context.Context, reason string) error // fetch contract ids var fcids []fileContractID if err := s.db. + WithContext(ctx). Model(&dbContract{}). Pluck("fcid", &fcids). Error; err != nil { @@ -920,6 +927,7 @@ func (s *SQLStore) ContractRoots(ctx context.Context, id types.FileContractID) ( var dbRoots []hash256 if err = s.db. + WithContext(ctx). Raw(` SELECT sec.root FROM contracts c @@ -938,7 +946,7 @@ WHERE c.fcid = ? func (s *SQLStore) ContractSets(ctx context.Context) ([]string, error) { var sets []string - err := s.db.Raw("SELECT name FROM contract_sets"). + err := s.db.WithContext(ctx).Raw("SELECT name FROM contract_sets"). Scan(&sets). Error return sets, err @@ -953,7 +961,7 @@ func (s *SQLStore) ContractSizes(ctx context.Context) (map[types.FileContractID] var nullContracts []size var dataContracts []size - if err := s.retryTransaction(func(tx *gorm.DB) error { + if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { // first, we fetch all contracts without sectors and consider their // entire size as prunable if err := tx. @@ -1003,6 +1011,7 @@ func (s *SQLStore) ContractSize(ctx context.Context, id types.FileContractID) (a } if err := s.db. + WithContext(ctx). Raw(` SELECT contract_size as size, CASE WHEN contract_size > sector_size THEN contract_size - sector_size ELSE 0 END as prunable FROM ( SELECT MAX(c.size) as contract_size, COUNT(cs.db_sector_id) * ? as sector_size FROM contracts c LEFT JOIN contract_sectors cs ON cs.db_contract_id = c.id WHERE c.fcid = ? @@ -1029,7 +1038,7 @@ func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds var diff []fileContractID var nContractsAfter int - err := s.retryTransaction(func(tx *gorm.DB) error { + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { // fetch contract set var cs dbContractSet err := tx. @@ -1097,6 +1106,7 @@ func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds func (s *SQLStore) RemoveContractSet(ctx context.Context, name string) error { return s.db. + WithContext(ctx). Where(dbContractSet{Name: name}). Delete(&dbContractSet{}). Error @@ -1106,6 +1116,7 @@ func (s *SQLStore) RenewedContract(ctx context.Context, renewedFrom types.FileCo var contract dbContract err = s.db. + WithContext(ctx). Where(&dbContract{ContractCommon: ContractCommon{RenewedFrom: fileContractID(renewedFrom)}}). Joins("Host"). Take(&contract). @@ -1126,6 +1137,7 @@ func (s *SQLStore) SearchObjects(ctx context.Context, bucket, substring string, var objects []api.ObjectMetadata err := s.db. + WithContext(ctx). Select("o.object_id as Name, o.size as Size, o.health as Health, o.mime_type as MimeType, o.etag as ETag, o.created_at as ModTime"). Model(&dbObject{}). Table("objects o"). @@ -1259,6 +1271,7 @@ FROM ( case api.ObjectSortByHealth: var markerHealth float64 if err = s.db. + WithContext(ctx). Raw(fmt.Sprintf(`SELECT Health FROM (%s WHERE oname >= ? ORDER BY oname LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerHealth). Error; err != nil { @@ -1275,6 +1288,7 @@ FROM ( case api.ObjectSortBySize: var markerSize float64 if err = s.db. + WithContext(ctx). Raw(fmt.Sprintf(`SELECT Size FROM (%s WHERE oname >= ? ORDER BY oname LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerSize). Error; err != nil { @@ -1315,6 +1329,7 @@ FROM ( parameters := append(append(objectsQueryParams, markerParams...), limit, offset) if err = s.db. + WithContext(ctx). Raw(query, parameters...). Scan(&rows). Error; err != nil { @@ -1335,8 +1350,8 @@ FROM ( } func (s *SQLStore) Object(ctx context.Context, bucket, path string) (obj api.Object, err error) { - err = s.db.Transaction(func(tx *gorm.DB) error { - obj, err = s.object(ctx, tx, bucket, path) + err = s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + obj, err = s.object(tx, bucket, path) return err }) return @@ -1367,7 +1382,7 @@ func (s *SQLStore) RecordContractSpending(ctx context.Context, records []api.Con } metrics := make([]api.ContractMetric, 0, len(squashedRecords)) for fcid, newSpending := range squashedRecords { - err := s.retryTransaction(func(tx *gorm.DB) error { + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { var contract dbContract err := tx.Model(&dbContract{}). Where("fcid = ?", fileContractID(fcid)). @@ -1470,7 +1485,7 @@ func fetchUsedContracts(tx *gorm.DB, usedContracts map[types.PublicKey]map[types } func (s *SQLStore) RenameObject(ctx context.Context, bucket, keyOld, keyNew string, force bool) error { - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { if force { // delete potentially existing object at destination if _, err := s.deleteObject(tx, bucket, keyNew); err != nil { @@ -1492,7 +1507,7 @@ func (s *SQLStore) RenameObject(ctx context.Context, bucket, keyOld, keyNew stri } func (s *SQLStore) RenameObjects(ctx context.Context, bucket, prefixOld, prefixNew string, force bool) error { - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { if force { // delete potentially existing objects at destination inner := tx.Raw("SELECT ? FROM objects WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND ?", @@ -1538,7 +1553,7 @@ func (s *SQLStore) AddPartialSlab(ctx context.Context, data []byte, minShards, t } func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (om api.ObjectMetadata, err error) { - err = s.retryTransaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { var srcObj dbObject err = tx.Where("objects.object_id = ? AND DBBucket.name = ?", srcPath, srcBucket). Joins("DBBucket"). @@ -1622,7 +1637,7 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) { var deletedSectors int - err := s.retryTransaction(func(tx *gorm.DB) error { + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { // Fetch contract_sectors to delete. var sectors []dbContractSector err := tx.Raw(` @@ -1707,7 +1722,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, usedContracts := o.Contracts() // UpdateObject is ACID. - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { // Fetch contract set. var cs dbContractSet if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { @@ -1778,7 +1793,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, func (s *SQLStore) RemoveObject(ctx context.Context, bucket, key string) error { var rowsAffected int64 var err error - err = s.retryTransaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { rowsAffected, err = s.deleteObject(tx, bucket, key) return err }) @@ -1794,7 +1809,7 @@ func (s *SQLStore) RemoveObject(ctx context.Context, bucket, key string) error { func (s *SQLStore) RemoveObjects(ctx context.Context, bucket, prefix string) error { var rowsAffected int64 var err error - rowsAffected, err = s.deleteObjects(bucket, prefix) + rowsAffected, err = s.deleteObjects(ctx, bucket, prefix) if err != nil { return err } @@ -1844,7 +1859,7 @@ func (ss *SQLStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet s usedContracts := s.Contracts() // Update slab. - return ss.retryTransaction(func(tx *gorm.DB) (err error) { + return ss.retryTransaction(ctx, func(tx *gorm.DB) (err error) { // update slab if err := tx.Model(&dbSlab{}). Where("key", key). @@ -1978,7 +1993,7 @@ LIMIT ? for { var rowsAffected int64 - err := s.retryTransaction(func(tx *gorm.DB) error { + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { var res *gorm.DB if isSQLite(s.db) { res = tx.Exec("UPDATE slabs SET health = inner.health, health_valid_until = (?) FROM (?) AS inner WHERE slabs.id=inner.id", sqlRandomTimestamp(s.db, now, refreshHealthMinHealthValidity, refreshHealthMaxHealthValidity), healthQuery) @@ -2028,7 +2043,7 @@ func (s *SQLStore) UnhealthySlabs(ctx context.Context, healthCutoff float64, set Health float64 } - if err := s.retryTransaction(func(tx *gorm.DB) error { + if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Select("slabs.key, slabs.health"). Joins("INNER JOIN contract_sets cs ON slabs.db_contract_set_id = cs.id"). Model(&dbSlab{}). @@ -2221,19 +2236,19 @@ func (s *SQLStore) createSlices(tx *gorm.DB, objID, multiPartID *uint, contractS } // object retrieves an object from the store. -func (s *SQLStore) object(ctx context.Context, tx *gorm.DB, bucket, path string) (api.Object, error) { +func (s *SQLStore) object(tx *gorm.DB, bucket, path string) (api.Object, error) { // fetch raw object data - raw, err := s.objectRaw(ctx, tx, bucket, path) + raw, err := s.objectRaw(tx, bucket, path) if errors.Is(err, gorm.ErrRecordNotFound) || len(raw) == 0 { return api.Object{}, api.ErrObjectNotFound } // hydrate raw object data - return s.objectHydrate(ctx, tx, bucket, path, raw) + return s.objectHydrate(tx, bucket, path, raw) } // objectHydrate hydrates a raw object and returns an api.Object. -func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path string, obj rawObject) (api.Object, error) { +func (s *SQLStore) objectHydrate(tx *gorm.DB, bucket, path string, obj rawObject) (api.Object, error) { // parse object key var key object.EncryptionKey if err := key.UnmarshalBinary(obj[0].ObjectKey); err != nil { @@ -2288,7 +2303,7 @@ func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path } // fetch object metadata - metadata, err := s.objectMetadata(ctx, tx, bucket, path) + metadata, err := s.objectMetadata(tx, bucket, path) if err != nil { return api.Object{}, err } @@ -2314,7 +2329,7 @@ func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path // ObjectMetadata returns an object's metadata func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { var resp api.Object - err := s.db.Transaction(func(tx *gorm.DB) error { + err := s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { var obj dbObject err := tx.Model(&dbObject{}). Joins("INNER JOIN buckets b ON objects.db_bucket_id = b.id"). @@ -2327,7 +2342,7 @@ func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (api } else if err != nil { return err } - oum, err := s.objectMetadata(ctx, tx, bucket, path) + oum, err := s.objectMetadata(tx, bucket, path) if err != nil { return err } @@ -2347,7 +2362,7 @@ func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (api return resp, err } -func (s *SQLStore) objectMetadata(ctx context.Context, tx *gorm.DB, bucket, path string) (api.ObjectUserMetadata, error) { +func (s *SQLStore) objectMetadata(tx *gorm.DB, bucket, path string) (api.ObjectUserMetadata, error) { var rows []dbObjectUserMetadata err := tx. Model(&dbObjectUserMetadata{}). @@ -2378,12 +2393,12 @@ func newObjectMetadata(name, etag, mimeType string, health float64, modTime time } } -func (s *SQLStore) objectRaw(ctx context.Context, txn *gorm.DB, bucket string, path string) (rows rawObject, err error) { +func (s *SQLStore) objectRaw(txn *gorm.DB, bucket string, path string) (rows rawObject, err error) { // NOTE: we LEFT JOIN here because empty objects are valid and need to be // included in the result set, when we convert the rawObject before // returning it we'll check for SlabID and/or SectorID being 0 and act // accordingly - err = s.db. + err = txn. Select("o.id as ObjectID, o.health as ObjectHealth, sli.object_index as ObjectIndex, o.key as ObjectKey, o.object_id as ObjectName, o.size as ObjectSize, o.mime_type as ObjectMimeType, o.created_at as ObjectModTime, o.etag as ObjectETag, sli.object_index, sli.offset as SliceOffset, sli.length as SliceLength, sla.id as SlabID, sla.health as SlabHealth, sla.key as SlabKey, sla.min_shards as SlabMinShards, bs.id IS NOT NULL AS SlabBuffered, sec.slab_index as SectorIndex, sec.root as SectorRoot, sec.latest_host as LatestHost, c.fcid as FCID, h.public_key as HostKey"). Model(&dbObject{}). Table("objects o"). @@ -2403,48 +2418,17 @@ func (s *SQLStore) objectRaw(ctx context.Context, txn *gorm.DB, bucket string, p return } -func (s *SQLStore) objectHealth(ctx context.Context, tx *gorm.DB, objectID uint) (health float64, err error) { - if err = tx. - Select("objects.health"). - Model(&dbObject{}). - Table("objects"). - Where("id", objectID). - Scan(&health). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - err = api.ErrObjectNotFound - } - return -} - // contract retrieves a contract from the store. func (s *SQLStore) contract(ctx context.Context, id fileContractID) (dbContract, error) { return contract(s.db, id) } -// contracts retrieves all contracts in the given set. -func (s *SQLStore) contracts(ctx context.Context, set string) ([]dbContract, error) { - var cs dbContractSet - err := s.db. - Where(&dbContractSet{Name: set}). - Preload("Contracts.Host"). - Take(&cs). - Error - - if errors.Is(err, gorm.ErrRecordNotFound) { - return nil, fmt.Errorf("%w '%s'", api.ErrContractSetNotFound, set) - } else if err != nil { - return nil, err - } - - return cs.Contracts, nil -} - // PackedSlabsForUpload returns up to 'limit' packed slabs that are ready for // uploading. They are locked for 'lockingDuration' time before being handed out // again. func (s *SQLStore) PackedSlabsForUpload(ctx context.Context, lockingDuration time.Duration, minShards, totalShards uint8, set string, limit int) ([]api.PackedSlab, error) { var contractSetID uint - if err := s.db.Raw("SELECT id FROM contract_sets WHERE name = ?", set). + if err := s.db.WithContext(ctx).Raw("SELECT id FROM contract_sets WHERE name = ?", set). Scan(&contractSetID).Error; err != nil { return nil, err } @@ -2458,7 +2442,7 @@ func (s *SQLStore) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey return nil, err } - err = s.retryTransaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Raw(` SELECT DISTINCT obj.object_id as Name, obj.size as Size, obj.mime_type as MimeType, sla.health as Health FROM slabs sla @@ -2494,7 +2478,7 @@ func (s *SQLStore) MarkPackedSlabsUploaded(ctx context.Context, slabs []api.Uplo } } var fileName string - err := s.retryTransaction(func(tx *gorm.DB) error { + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { for _, slab := range slabs { var err error fileName, err = s.markPackedSlabUploaded(tx, slab) @@ -2670,14 +2654,14 @@ func addContract(tx *gorm.DB, c rhpv2.ContractRevision, contractPrice, totalCost // archival reason // // NOTE: this function archives the contracts without setting a renewed ID -func archiveContracts(ctx context.Context, tx *gorm.DB, contracts []dbContract, toArchive map[types.FileContractID]string) error { +func archiveContracts(tx *gorm.DB, contracts []dbContract, toArchive map[types.FileContractID]string) error { var toInvalidate []fileContractID for _, contract := range contracts { toInvalidate = append(toInvalidate, contract.FCID) } // Invalidate the health on the slabs before deleting the contracts to avoid // breaking the relations beforehand. - if err := invalidateSlabHealthByFCID(ctx, tx, toInvalidate); err != nil { + if err := invalidateSlabHealthByFCID(tx, toInvalidate); err != nil { return fmt.Errorf("invalidating slab health failed: %w", err) } for _, contract := range contracts { @@ -2740,12 +2724,12 @@ func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, // deletion goes from largest to smallest. That's because the batch size is // dynamically increased and the smaller objects get the faster we can delete // them meaning it makes sense to increase the batch size over time. -func (s *SQLStore) deleteObjects(bucket string, path string) (numDeleted int64, _ error) { +func (s *SQLStore) deleteObjects(ctx context.Context, bucket string, path string) (numDeleted int64, _ error) { batchSizeIdx := 0 for { var duration time.Duration var rowsAffected int64 - if err := s.retryTransaction(func(tx *gorm.DB) error { + if err := s.retryTransaction(ctx, func(tx *gorm.DB) error { start := time.Now() res := tx.Exec(` DELETE FROM objects @@ -2787,7 +2771,7 @@ func (s *SQLStore) deleteObjects(bucket string, path string) (numDeleted int64, return numDeleted, nil } -func invalidateSlabHealthByFCID(ctx context.Context, tx *gorm.DB, fcids []fileContractID) error { +func invalidateSlabHealthByFCID(tx *gorm.DB, fcids []fileContractID) error { if len(fcids) == 0 { return nil } @@ -2811,19 +2795,14 @@ func invalidateSlabHealthByFCID(ctx context.Context, tx *gorm.DB, fcids []fileCo } else if resp.RowsAffected < refreshHealthBatchSize { break // done } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(time.Second): - } + time.Sleep(time.Second) } return nil } func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []fileContractID) error { - return s.retryTransaction(func(tx *gorm.DB) error { - return invalidateSlabHealthByFCID(ctx, tx, fcids) + return s.retryTransaction(ctx, func(tx *gorm.DB) error { + return invalidateSlabHealthByFCID(tx, fcids) }) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index f5461147c..dcd4aaa9d 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1867,7 +1867,7 @@ func TestUnhealthySlabsNoContracts(t *testing.T) { // delete the sector - we manually invalidate the slabs for the contract // before deletion. - err = invalidateSlabHealthByFCID(context.Background(), ss.db, []fileContractID{fileContractID(fcid1)}) + err = invalidateSlabHealthByFCID(ss.db, []fileContractID{fileContractID(fcid1)}) if err != nil { t.Fatal(err) } @@ -3285,7 +3285,7 @@ func TestBucketObjects(t *testing.T) { // See if we can fetch the object by slab. var ec object.EncryptionKey - if obj, err := ss.objectRaw(context.Background(), ss.db, b1, "/bar"); err != nil { + if obj, err := ss.objectRaw(ss.db, b1, "/bar"); err != nil { t.Fatal(err) } else if err := ec.UnmarshalBinary(obj[0].SlabKey); err != nil { t.Fatal(err) diff --git a/stores/metrics.go b/stores/metrics.go index 333ed8a42..c8369f630 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -450,11 +450,11 @@ func (s *SQLStore) contractMetrics(ctx context.Context, start time.Time, n uint6 if opts.ContractID == (types.FileContractID{}) && opts.HostKey == (types.PublicKey{}) { // if neither contract nor host filters were set, we return the // aggregate spending for each period - metrics, err = s.findAggregatedContractPeriods(start, n, interval) + metrics, err = s.findAggregatedContractPeriods(ctx, start, n, interval) } else { // otherwise we return the first metric for each period like we usually // do - err = s.findPeriods(dbContractMetric{}.TableName(), &metrics, start, n, interval, whereExpr) + err = s.findPeriods(ctx, dbContractMetric{}.TableName(), &metrics, start, n, interval, whereExpr) } if err != nil { return nil, fmt.Errorf("failed to fetch contract metrics: %w", err) @@ -478,7 +478,7 @@ func (s *SQLStore) contractPruneMetrics(ctx context.Context, start time.Time, n } var metrics []dbContractPruneMetric - err := s.findPeriods(dbContractPruneMetric{}.TableName(), &metrics, start, n, interval, whereExpr) + err := s.findPeriods(ctx, dbContractPruneMetric{}.TableName(), &metrics, start, n, interval, whereExpr) if err != nil { return nil, fmt.Errorf("failed to fetch contract metrics: %w", err) } @@ -498,7 +498,7 @@ func (s *SQLStore) contractSetChurnMetrics(ctx context.Context, start time.Time, whereExpr = gorm.Expr("? AND reason = ?", whereExpr, opts.Reason) } var metrics []dbContractSetChurnMetric - err := s.findPeriods(dbContractSetChurnMetric{}.TableName(), &metrics, start, n, interval, whereExpr) + err := s.findPeriods(ctx, dbContractSetChurnMetric{}.TableName(), &metrics, start, n, interval, whereExpr) if err != nil { return nil, fmt.Errorf("failed to fetch contract set churn metrics: %w", err) } @@ -515,7 +515,7 @@ func (s *SQLStore) contractSetMetrics(ctx context.Context, start time.Time, n ui } var metrics []dbContractSetMetric - err := s.findPeriods(dbContractSetMetric{}.TableName(), &metrics, start, n, interval, whereExpr) + err := s.findPeriods(ctx, dbContractSetMetric{}.TableName(), &metrics, start, n, interval, whereExpr) if err != nil { return nil, fmt.Errorf("failed to fetch contract set metrics: %w", err) } @@ -536,7 +536,7 @@ func normaliseTimestamp(start time.Time, interval time.Duration, t unixTimeMS) u return unixTimeMS(time.UnixMilli(normalizedMS)) } -func (s *SQLStore) findAggregatedContractPeriods(start time.Time, n uint64, interval time.Duration) ([]dbContractMetric, error) { +func (s *SQLStore) findAggregatedContractPeriods(ctx context.Context, start time.Time, n uint64, interval time.Duration) ([]dbContractMetric, error) { if n > api.MetricMaxIntervals { return nil, api.ErrMaxIntervalsExceeded } @@ -548,7 +548,7 @@ func (s *SQLStore) findAggregatedContractPeriods(start time.Time, n uint64, inte } var metricsWithPeriod []metricWithPeriod - err := s.dbMetrics.Transaction(func(tx *gorm.DB) error { + err := s.dbMetrics.WithContext(ctx).Transaction(func(tx *gorm.DB) error { var fcids []fileContractID if err := tx.Raw("SELECT DISTINCT fcid FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ?", unixTimeMS(start), unixTimeMS(end)). Scan(&fcids).Error; err != nil { @@ -599,12 +599,12 @@ func (s *SQLStore) findAggregatedContractPeriods(start time.Time, n uint64, inte // split into intervals and the row with the lowest timestamp for each interval // is returned. The result is then joined with the original table to retrieve // only the metrics we want. -func (s *SQLStore) findPeriods(table string, dst interface{}, start time.Time, n uint64, interval time.Duration, whereExpr clause.Expr) error { +func (s *SQLStore) findPeriods(ctx context.Context, table string, dst interface{}, start time.Time, n uint64, interval time.Duration, whereExpr clause.Expr) error { if n > api.MetricMaxIntervals { return api.ErrMaxIntervalsExceeded } end := start.Add(time.Duration(n) * interval) - return s.dbMetrics.Raw(fmt.Sprintf(` + return s.dbMetrics.WithContext(ctx).Raw(fmt.Sprintf(` WITH RECURSIVE periods AS ( SELECT ? AS period_start UNION ALL @@ -637,7 +637,7 @@ func (s *SQLStore) findPeriods(table string, dst interface{}, start time.Time, n } func (s *SQLStore) walletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) (metrics []dbWalletMetric, err error) { - err = s.findPeriods(dbWalletMetric{}.TableName(), &metrics, start, n, interval, gorm.Expr("TRUE")) + err = s.findPeriods(ctx, dbWalletMetric{}.TableName(), &metrics, start, n, interval, gorm.Expr("TRUE")) if err != nil { return nil, fmt.Errorf("failed to fetch wallet metrics: %w", err) } @@ -660,7 +660,7 @@ func (s *SQLStore) performanceMetrics(ctx context.Context, start time.Time, n ui } var metrics []dbPerformanceMetric - err := s.findPeriods(dbPerformanceMetric{}.TableName(), &metrics, start, n, interval, whereExpr) + err := s.findPeriods(ctx, dbPerformanceMetric{}.TableName(), &metrics, start, n, interval, whereExpr) if err != nil { return nil, fmt.Errorf("failed to fetch performance metrics: %w", err) } diff --git a/stores/multipart.go b/stores/multipart.go index 864503455..53a9afeac 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -56,7 +56,7 @@ func (s *SQLStore) CreateMultipartUpload(ctx context.Context, bucket, path strin return api.MultipartCreateResponse{}, err } var uploadID string - err = s.retryTransaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { // Get bucket id. var bucketID uint err := tx.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). @@ -108,7 +108,7 @@ func (s *SQLStore) AddMultipartPart(ctx context.Context, bucket, path, contractS } } } - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { // Fetch contract set. var cs dbContractSet if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { @@ -160,7 +160,7 @@ func (s *SQLStore) AddMultipartPart(ctx context.Context, bucket, path, contractS } func (s *SQLStore) MultipartUpload(ctx context.Context, uploadID string) (resp api.MultipartUpload, err error) { - err = s.retryTransaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { var dbUpload dbMultipartUpload err := tx. Model(&dbMultipartUpload{}). @@ -201,7 +201,7 @@ func (s *SQLStore) MultipartUploads(ctx context.Context, bucket, prefix, keyMark prefixExpr = gorm.Expr("SUBSTR(object_id, 1, ?) = ?", utf8.RuneCountInString(prefix), prefix) } - err = s.retryTransaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { var dbUploads []dbMultipartUpload err := tx. Model(&dbMultipartUpload{}). @@ -243,7 +243,7 @@ func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object stri limit++ } - err := s.retryTransaction(func(tx *gorm.DB) error { + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { var dbParts []dbMultipartPart err := tx. Model(&dbMultipartPart{}). @@ -277,7 +277,7 @@ func (s *SQLStore) MultipartUploadParts(ctx context.Context, bucket, object stri } func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { - return s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { // delete multipart upload optimistically res := tx. Where("upload_id", uploadID). @@ -326,7 +326,7 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str } } var eTag string - err = s.retryTransaction(func(tx *gorm.DB) error { + err = s.retryTransaction(ctx, func(tx *gorm.DB) error { // Find multipart upload. var mu dbMultipartUpload err = tx.Where("upload_id = ?", uploadID). diff --git a/stores/slabbuffer.go b/stores/slabbuffer.go index e1c7290ea..2d16c8e33 100644 --- a/stores/slabbuffer.go +++ b/stores/slabbuffer.go @@ -204,7 +204,7 @@ func (mgr *SlabBufferManager) AddPartialSlab(ctx context.Context, data []byte, m // If there is still data left, create a new buffer. if len(data) > 0 { var sb *SlabBuffer - err = mgr.s.retryTransaction(func(tx *gorm.DB) error { + err = mgr.s.retryTransaction(ctx, func(tx *gorm.DB) error { sb, err = createSlabBuffer(tx, contractSet, mgr.dir, minShards, totalShards) return err }) diff --git a/stores/sql.go b/stores/sql.go index 5d9d9cea8..53ac96207 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -446,7 +446,7 @@ func (ss *SQLStore) applyUpdates(force bool) error { ss.logger.Error(fmt.Sprintf("failed to fetch blocklist, err: %v", err)) } - err := ss.retryTransaction(func(tx *gorm.DB) (err error) { + err := ss.retryTransaction(context.Background(), func(tx *gorm.DB) (err error) { if len(ss.unappliedAnnouncements) > 0 { if err = insertAnnouncements(tx, ss.unappliedAnnouncements); err != nil { return fmt.Errorf("%w; failed to insert %d announcements", err, len(ss.unappliedAnnouncements)) @@ -514,9 +514,10 @@ func (ss *SQLStore) applyUpdates(force bool) error { return nil } -func (s *SQLStore) retryTransaction(fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) error { +func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) error { abortRetry := func(err error) bool { if err == nil || + errors.Is(err, context.Canceled) || errors.Is(err, gorm.ErrRecordNotFound) || errors.Is(err, errInvalidNumberOfShards) || errors.Is(err, errShardRootChanged) || @@ -539,7 +540,7 @@ func (s *SQLStore) retryTransaction(fc func(tx *gorm.DB) error, opts ...*sql.TxO } var err error for i := 0; i < len(s.retryTransactionIntervals); i++ { - err = s.db.Transaction(fc, opts...) + err = s.db.WithContext(ctx).Transaction(fc, opts...) if abortRetry(err) { return err } @@ -566,10 +567,10 @@ func initConsensusInfo(db *gorm.DB) (dbConsensusInfo, modules.ConsensusChangeID, return ci, ccid, nil } -func (s *SQLStore) ResetConsensusSubscription() error { +func (s *SQLStore) ResetConsensusSubscription(ctx context.Context) error { // empty tables and reinit consensus_infos var ci dbConsensusInfo - err := s.retryTransaction(func(tx *gorm.DB) error { + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { if err := s.db.Exec("DELETE FROM consensus_infos").Error; err != nil { return err } else if err := s.db.Exec("DELETE FROM siacoin_elements").Error; err != nil { diff --git a/stores/sql_test.go b/stores/sql_test.go index 776e3e10e..5b79cb0ce 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -292,7 +292,7 @@ func TestConsensusReset(t *testing.T) { }) // Reset the consensus. - if err := ss.ResetConsensusSubscription(); err != nil { + if err := ss.ResetConsensusSubscription(context.Background()); err != nil { t.Fatal(err) } diff --git a/stores/webhooks.go b/stores/webhooks.go index f3fc26057..4db325698 100644 --- a/stores/webhooks.go +++ b/stores/webhooks.go @@ -1,6 +1,8 @@ package stores import ( + "context" + "go.sia.tech/renterd/webhooks" "gorm.io/gorm" "gorm.io/gorm/clause" @@ -20,8 +22,8 @@ func (dbWebhook) TableName() string { return "webhooks" } -func (s *SQLStore) DeleteWebhook(wb webhooks.Webhook) error { - return s.retryTransaction(func(tx *gorm.DB) error { +func (s *SQLStore) DeleteWebhook(ctx context.Context, wb webhooks.Webhook) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { res := tx.Exec("DELETE FROM webhooks WHERE module = ? AND event = ? AND url = ?", wb.Module, wb.Event, wb.URL) if res.Error != nil { @@ -33,8 +35,8 @@ func (s *SQLStore) DeleteWebhook(wb webhooks.Webhook) error { }) } -func (s *SQLStore) AddWebhook(wb webhooks.Webhook) error { - return s.retryTransaction(func(tx *gorm.DB) error { +func (s *SQLStore) AddWebhook(ctx context.Context, wb webhooks.Webhook) error { + return s.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Clauses(clause.OnConflict{ DoNothing: true, }).Create(&dbWebhook{ @@ -45,9 +47,9 @@ func (s *SQLStore) AddWebhook(wb webhooks.Webhook) error { }) } -func (s *SQLStore) Webhooks() ([]webhooks.Webhook, error) { +func (s *SQLStore) Webhooks(ctx context.Context) ([]webhooks.Webhook, error) { var dbWebhooks []dbWebhook - if err := s.db.Find(&dbWebhooks).Error; err != nil { + if err := s.db.WithContext(ctx).Find(&dbWebhooks).Error; err != nil { return nil, err } var whs []webhooks.Webhook diff --git a/stores/webhooks_test.go b/stores/webhooks_test.go index ad1973125..b306eef2c 100644 --- a/stores/webhooks_test.go +++ b/stores/webhooks_test.go @@ -1,6 +1,7 @@ package stores import ( + "context" "testing" "github.com/google/go-cmp/cmp" @@ -23,10 +24,10 @@ func TestWebhooks(t *testing.T) { } // Add hook. - if err := ss.AddWebhook(wh1); err != nil { + if err := ss.AddWebhook(context.Background(), wh1); err != nil { t.Fatal(err) } - whs, err := ss.Webhooks() + whs, err := ss.Webhooks(context.Background()) if err != nil { t.Fatal(err) } else if len(whs) != 1 { @@ -36,10 +37,10 @@ func TestWebhooks(t *testing.T) { } // Add it again. Should be a no-op. - if err := ss.AddWebhook(wh1); err != nil { + if err := ss.AddWebhook(context.Background(), wh1); err != nil { t.Fatal(err) } - whs, err = ss.Webhooks() + whs, err = ss.Webhooks(context.Background()) if err != nil { t.Fatal(err) } else if len(whs) != 1 { @@ -49,10 +50,10 @@ func TestWebhooks(t *testing.T) { } // Add another. - if err := ss.AddWebhook(wh2); err != nil { + if err := ss.AddWebhook(context.Background(), wh2); err != nil { t.Fatal(err) } - whs, err = ss.Webhooks() + whs, err = ss.Webhooks(context.Background()) if err != nil { t.Fatal(err) } else if len(whs) != 2 { @@ -64,10 +65,10 @@ func TestWebhooks(t *testing.T) { } // Remove one. - if err := ss.DeleteWebhook(wh1); err != nil { + if err := ss.DeleteWebhook(context.Background(), wh1); err != nil { t.Fatal(err) } - whs, err = ss.Webhooks() + whs, err = ss.Webhooks(context.Background()) if err != nil { t.Fatal(err) } else if len(whs) != 1 { diff --git a/webhooks/webhooks.go b/webhooks/webhooks.go index e3d388de8..665f8a2c4 100644 --- a/webhooks/webhooks.go +++ b/webhooks/webhooks.go @@ -19,9 +19,9 @@ var ErrWebhookNotFound = errors.New("Webhook not found") type ( WebhookStore interface { - DeleteWebhook(wh Webhook) error - AddWebhook(wh Webhook) error - Webhooks() ([]Webhook, error) + DeleteWebhook(ctx context.Context, wh Webhook) error + AddWebhook(ctx context.Context, wh Webhook) error + Webhooks(ctx context.Context) ([]Webhook, error) } Broadcaster interface { @@ -122,10 +122,10 @@ func (m *Manager) Close() error { return nil } -func (m *Manager) Delete(wh Webhook) error { +func (m *Manager) Delete(ctx context.Context, wh Webhook) error { m.mu.Lock() defer m.mu.Unlock() - if err := m.store.DeleteWebhook(wh); errors.Is(err, gorm.ErrRecordNotFound) { + if err := m.store.DeleteWebhook(ctx, wh); errors.Is(err, gorm.ErrRecordNotFound) { return ErrWebhookNotFound } else if err != nil { return err @@ -157,7 +157,7 @@ func (m *Manager) Info() ([]Webhook, []WebhookQueueInfo) { return hooks, queueInfos } -func (m *Manager) Register(wh Webhook) error { +func (m *Manager) Register(ctx context.Context, wh Webhook) error { ctx, cancel := context.WithTimeout(m.shutdownCtx, webhookTimeout) defer cancel() @@ -170,7 +170,7 @@ func (m *Manager) Register(wh Webhook) error { } // Add Webhook. - if err := m.store.AddWebhook(wh); err != nil { + if err := m.store.AddWebhook(ctx, wh); err != nil { return err } m.mu.Lock() @@ -214,11 +214,6 @@ func (w Webhook) String() string { } func NewManager(logger *zap.SugaredLogger, store WebhookStore) (*Manager, error) { - hooks, err := store.Webhooks() - if err != nil { - return nil, err - } - shutdownCtx, shutdownCtxCancel := context.WithCancel(context.Background()) m := &Manager{ logger: logger.Named("webhooks"), @@ -230,7 +225,10 @@ func NewManager(logger *zap.SugaredLogger, store WebhookStore) (*Manager, error) queues: make(map[string]*eventQueue), webhooks: make(map[string]Webhook), } - + hooks, err := store.Webhooks(shutdownCtx) + if err != nil { + return nil, err + } for _, hook := range hooks { m.webhooks[hook.String()] = hook } From 508624d4a6f2edcf704bc4af53a4d5484aeb3414 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 17:05:13 +0100 Subject: [PATCH 021/201] stores: reorder UpdateObject --- stores/metadata.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 3f9cf22c5..e6dcf9363 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1773,18 +1773,18 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return err } - // Fetch the used contracts. - contracts, err := fetchUsedContracts(tx, usedContracts) - if err != nil { - return fmt.Errorf("failed to fetch used contracts: %w", err) - } - // Fetch contract set. var cs dbContractSet if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { return fmt.Errorf("contract set %v not found: %w", contractSet, err) } + // Fetch the used contracts. + contracts, err := fetchUsedContracts(tx, usedContracts) + if err != nil { + return fmt.Errorf("failed to fetch used contracts: %w", err) + } + // Create all slices. This also creates any missing slabs or sectors. if err := s.createSlices(tx, &objID, nil, cs.ID, contracts, o.Slabs); err != nil { return fmt.Errorf("failed to create slices: %w", err) From 75fd3c271a3025c28004f430a0084b1e4bf25a90 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 12 Mar 2024 17:51:06 +0100 Subject: [PATCH 022/201] lint: add unused and unparm linters --- .golangci.yml | 2 ++ autopilot/accounts.go | 4 ++-- autopilot/autopilot.go | 4 ++-- autopilot/contract_spending.go | 13 +++++-------- autopilot/contractor.go | 21 +++++++-------------- autopilot/contractor_test.go | 7 +++---- autopilot/hostscore.go | 8 ++++---- autopilot/migrator.go | 2 +- autopilot/scanner.go | 8 ++++---- autopilot/scanner_test.go | 4 ++-- bus/client/objects.go | 2 +- bus/contractlocking_test.go | 8 ++++---- cmd/renterd/config.go | 1 + internal/node/node.go | 5 +---- internal/test/e2e/cluster_test.go | 8 ++------ stores/hostdb.go | 18 ------------------ stores/metadata.go | 4 +++- stores/metadata_test.go | 16 ++++++++-------- stores/migrations.go | 2 +- stores/migrations_metrics.go | 2 +- stores/migrations_utils.go | 2 +- stores/sql.go | 5 ++--- worker/download.go | 6 +++--- worker/downloader.go | 2 +- worker/rhpv2.go | 6 +++--- worker/rhpv3.go | 4 ++-- worker/upload.go | 8 ++++---- worker/worker.go | 2 +- 28 files changed, 71 insertions(+), 103 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ad04bb78e..ace11db65 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -102,6 +102,8 @@ linters: - typecheck - whitespace - tagliatelle + - unused + - unparam issues: # Maximum issues count per one linter. Set to 0 to disable. Default is 50. diff --git a/autopilot/accounts.go b/autopilot/accounts.go index 974222dbe..690c2b35d 100644 --- a/autopilot/accounts.go +++ b/autopilot/accounts.go @@ -141,7 +141,7 @@ func (a *accounts) refillWorkerAccounts(ctx context.Context, w Worker) { go func(contract api.ContractMetadata) { rCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - accountID, refilled, rerr := refillWorkerAccount(rCtx, a.a, w, workerID, contract) + accountID, refilled, rerr := refillWorkerAccount(rCtx, a.a, w, contract) if rerr != nil { if rerr.Is(errMaxDriftExceeded) { // register the alert if error is errMaxDriftExceeded @@ -184,7 +184,7 @@ func (err *refillError) Is(target error) bool { return errors.Is(err.err, target) } -func refillWorkerAccount(ctx context.Context, a AccountStore, w Worker, workerID string, contract api.ContractMetadata) (accountID rhpv3.Account, refilled bool, rerr *refillError) { +func refillWorkerAccount(ctx context.Context, a AccountStore, w Worker, contract api.ContractMetadata) (accountID rhpv3.Account, refilled bool, rerr *refillError) { wrapErr := func(err error, keysAndValues ...interface{}) *refillError { if err == nil { return nil diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 7367003e0..bc1517a02 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -321,11 +321,11 @@ func (ap *Autopilot) Run() error { } // migration - ap.m.tryPerformMigrations(ap.shutdownCtx, ap.workers) + ap.m.tryPerformMigrations(ap.workers) // pruning if ap.state.cfg.Contracts.Prune { - ap.c.tryPerformPruning(ap.shutdownCtx, ap.workers) + ap.c.tryPerformPruning(ap.workers) } else { ap.logger.Debug("pruning disabled") } diff --git a/autopilot/contract_spending.go b/autopilot/contract_spending.go index ba144e173..cbd10f86c 100644 --- a/autopilot/contract_spending.go +++ b/autopilot/contract_spending.go @@ -20,7 +20,7 @@ func (c *contractor) contractSpending(ctx context.Context, contract api.Contract return total, nil } -func (c *contractor) currentPeriodSpending(contracts []api.Contract, currentPeriod uint64) (types.Currency, error) { +func (c *contractor) currentPeriodSpending(contracts []api.Contract, currentPeriod uint64) types.Currency { totalCosts := make(map[types.FileContractID]types.Currency) for _, c := range contracts { totalCosts[c.ID] = c.TotalCost @@ -41,22 +41,19 @@ func (c *contractor) currentPeriodSpending(contracts []api.Contract, currentPeri for _, contract := range filtered { totalAllocated = totalAllocated.Add(contract.TotalCost) } - return totalAllocated, nil + return totalAllocated } -func (c *contractor) remainingFunds(contracts []api.Contract) (types.Currency, error) { +func (c *contractor) remainingFunds(contracts []api.Contract) types.Currency { state := c.ap.State() // find out how much we spent in the current period - spent, err := c.currentPeriodSpending(contracts, state.period) - if err != nil { - return types.ZeroCurrency, err - } + spent := c.currentPeriodSpending(contracts, state.period) // figure out remaining funds var remaining types.Currency if state.cfg.Contracts.Allowance.Cmp(spent) > 0 { remaining = state.cfg.Contracts.Allowance.Sub(spent) } - return remaining, nil + return remaining } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 4e5e8c842..b2d749a88 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -275,7 +275,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // min score to pass checks var minScore float64 if len(hosts) > 0 { - minScore = c.calculateMinScore(ctx, candidates, state.cfg.Contracts.Amount) + minScore = c.calculateMinScore(candidates, state.cfg.Contracts.Amount) } else { c.logger.Warn("could not calculate min score, no hosts found") } @@ -323,10 +323,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // calculate remaining funds - remaining, err := c.remainingFunds(contracts) - if err != nil { - return false, err - } + remaining := c.remainingFunds(contracts) // calculate 'limit' amount of contracts we want to renew var limit int @@ -1139,7 +1136,7 @@ func (c *contractor) initialContractFunding(settings rhpv2.HostSettings, txnFee, return funding } -func (c *contractor) refreshFundingEstimate(ctx context.Context, cfg api.AutopilotConfig, ci contractInfo, fee types.Currency) (types.Currency, error) { +func (c *contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contractInfo, fee types.Currency) types.Currency { // refresh with 1.2x the funds refreshAmount := ci.contract.TotalCost.Mul64(6).Div64(5) @@ -1158,7 +1155,7 @@ func (c *contractor) refreshFundingEstimate(ctx context.Context, cfg api.Autopil "fcid", ci.contract.ID, "refreshAmount", refreshAmount, "refreshAmountCapped", refreshAmountCapped) - return refreshAmountCapped, nil + return refreshAmountCapped } func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { @@ -1248,7 +1245,7 @@ func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, return cappedEstimatedCost, nil } -func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredHost, numContracts uint64) float64 { +func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uint64) float64 { // return early if there's no hosts if len(candidates) == 0 { c.logger.Warn("min host score is set to the smallest non-zero float because there are no candidate hosts") @@ -1474,11 +1471,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI // calculate the renter funds var renterFunds types.Currency if isOutOfFunds(state.cfg, ci.priceTable, ci.contract) { - renterFunds, err = c.refreshFundingEstimate(ctx, state.cfg, ci, state.fee) - if err != nil { - c.logger.Errorw(fmt.Sprintf("could not get refresh funding estimate, err: %v", err), "hk", hk, "fcid", fcid) - return api.ContractMetadata{}, true, err - } + renterFunds = c.refreshFundingEstimate(state.cfg, ci, state.fee) } else { renterFunds = rev.ValidRenterPayout() // don't increase funds } @@ -1598,7 +1591,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos return formedContract, true, nil } -func (c *contractor) tryPerformPruning(ctx context.Context, wp *workerPool) { +func (c *contractor) tryPerformPruning(wp *workerPool) { c.mu.Lock() if c.pruning || c.ap.isStopped() { c.mu.Unlock() diff --git a/autopilot/contractor_test.go b/autopilot/contractor_test.go index a0f63425b..575605612 100644 --- a/autopilot/contractor_test.go +++ b/autopilot/contractor_test.go @@ -1,7 +1,6 @@ package autopilot import ( - "context" "math" "testing" @@ -19,19 +18,19 @@ func TestCalculateMinScore(t *testing.T) { } // Test with 100 hosts which makes for a random set size of 250 - minScore := c.calculateMinScore(context.Background(), candidates, 100) + minScore := c.calculateMinScore(candidates, 100) if minScore != 0.002 { t.Fatalf("expected minScore to be 0.002 but was %v", minScore) } // Test with 0 hosts - minScore = c.calculateMinScore(context.Background(), []scoredHost{}, 100) + minScore = c.calculateMinScore([]scoredHost{}, 100) if minScore != math.SmallestNonzeroFloat64 { t.Fatalf("expected minScore to be math.SmallestNonzeroFLoat64 but was %v", minScore) } // Test with 300 hosts which is 50 more than we have - minScore = c.calculateMinScore(context.Background(), candidates, 300) + minScore = c.calculateMinScore(candidates, 300) if minScore != math.SmallestNonzeroFloat64 { t.Fatalf("expected minScore to be math.SmallestNonzeroFLoat64 but was %v", minScore) } diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index b15857d19..e8d9ca9b9 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -36,7 +36,7 @@ func hostScore(cfg api.AutopilotConfig, h hostdb.Host, storedData uint64, expect Collateral: collateralScore(cfg, h.PriceTable.HostPriceTable, uint64(allocationPerHost)), Interactions: interactionScore(h), Prices: priceAdjustmentScore(hostPeriodCost, cfg), - StorageRemaining: storageRemainingScore(cfg, h.Settings, storedData, expectedRedundancy, allocationPerHost), + StorageRemaining: storageRemainingScore(h.Settings, storedData, allocationPerHost), Uptime: uptimeScore(h), Version: versionScore(h.Settings), } @@ -74,7 +74,7 @@ func priceAdjustmentScore(hostCostPerPeriod types.Currency, cfg api.AutopilotCon panic("unreachable") } -func storageRemainingScore(cfg api.AutopilotConfig, h rhpv2.HostSettings, storedData uint64, expectedRedundancy, allocationPerHost float64) float64 { +func storageRemainingScore(h rhpv2.HostSettings, storedData uint64, allocationPerHost float64) float64 { // hostExpectedStorage is the amount of storage that we expect to be able to // store on this host overall, which should include the stored data that is // already on the host. @@ -291,7 +291,7 @@ func uploadCostForScore(cfg api.AutopilotConfig, h hostdb.Host, bytes uint64) ty return uploadSectorCostRHPv3.Mul64(numSectors) } -func downloadCostForScore(cfg api.AutopilotConfig, h hostdb.Host, bytes uint64) types.Currency { +func downloadCostForScore(h hostdb.Host, bytes uint64) types.Currency { rsc := h.PriceTable.BaseCost().Add(h.PriceTable.ReadSectorCost(rhpv2.SectorSize)) downloadSectorCostRHPv3, _ := rsc.Total() numSectors := bytesToSectors(bytes) @@ -314,7 +314,7 @@ func hostPeriodCostForScore(h hostdb.Host, cfg api.AutopilotConfig, expectedRedu hostCollateral := rhpv2.ContractFormationCollateral(cfg.Contracts.Period, storagePerHost, h.Settings) hostContractPrice := contractPriceForScore(h) hostUploadCost := uploadCostForScore(cfg, h, uploadPerHost) - hostDownloadCost := downloadCostForScore(cfg, h, downloadPerHost) + hostDownloadCost := downloadCostForScore(h, downloadPerHost) hostStorageCost := storageCostForScore(cfg, h, storagePerHost) siafundFee := hostCollateral. Add(hostContractPrice). diff --git a/autopilot/migrator.go b/autopilot/migrator.go index 4a4e31de6..f682e3d61 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -97,7 +97,7 @@ func (m *migrator) slabMigrationEstimate(remaining int) time.Duration { return time.Duration(totalNumMS) * time.Millisecond } -func (m *migrator) tryPerformMigrations(ctx context.Context, wp *workerPool) { +func (m *migrator) tryPerformMigrations(wp *workerPool) { m.mu.Lock() if m.migrating || m.ap.isStopped() { m.mu.Unlock() diff --git a/autopilot/scanner.go b/autopilot/scanner.go index e512d1f87..925aef2a1 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -162,9 +162,9 @@ func (s *scanner) isInterrupted() bool { } } -func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bool) bool { +func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bool) { if s.ap.isStopped() { - return false + return } scanType := "host scan" @@ -184,7 +184,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo s.interruptScanChan = make(chan struct{}) } else if s.scanning || !s.isScanRequired() { s.mu.Unlock() - return false + return } s.scanningLastStart = time.Now() s.scanning = true @@ -228,7 +228,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo s.logger.Debugf("%s finished after %v", st, time.Since(s.scanningLastStart)) s.mu.Unlock() }(scanType) - return true + return } func (s *scanner) tryUpdateTimeout() { diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index d5833d1fb..6214ec4a1 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -87,7 +87,7 @@ func TestScanner(t *testing.T) { // init new scanner b := &mockBus{hosts: hosts} w := &mockWorker{blockChan: make(chan struct{})} - s := newTestScanner(b, w) + s := newTestScanner(b) // assert it started a host scan s.tryPerformHostScan(context.Background(), w, false) @@ -139,7 +139,7 @@ func (s *scanner) isScanning() bool { return s.scanning } -func newTestScanner(b *mockBus, w *mockWorker) *scanner { +func newTestScanner(b *mockBus) *scanner { ap := &Autopilot{} ap.shutdownCtx, ap.shutdownCtxCancel = context.WithCancel(context.Background()) return &scanner{ diff --git a/bus/client/objects.go b/bus/client/objects.go index 23011a9ba..6a17691e2 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -112,7 +112,7 @@ func (c *Client) SearchObjects(ctx context.Context, bucket string, opts api.Sear } func (c *Client) renameObjects(ctx context.Context, bucket, from, to, mode string, force bool) (err error) { - err = c.c.POST("/objects/rename", api.ObjectsRenameRequest{ + err = c.c.WithContext(ctx).POST("/objects/rename", api.ObjectsRenameRequest{ Bucket: bucket, Force: force, From: from, diff --git a/bus/contractlocking_test.go b/bus/contractlocking_test.go index a00198cc9..120ca9ca2 100644 --- a/bus/contractlocking_test.go +++ b/bus/contractlocking_test.go @@ -154,7 +154,7 @@ func TestContractKeepalive(t *testing.T) { func TestContractRelease(t *testing.T) { locks := newContractLocks() - verify := func(fcid types.FileContractID, lockID uint64, lockedUntil time.Time, delta time.Duration) { + verify := func(fcid types.FileContractID, lockID uint64) { t.Helper() lock := locks.lockForContractID(fcid, false) if lock.heldByID != lockID { @@ -168,7 +168,7 @@ func TestContractRelease(t *testing.T) { if err != nil { t.Fatal(err) } - verify(fcid, lockID, time.Now().Add(time.Minute), 3*time.Second) + verify(fcid, lockID) // Acquire it again but release the contract within a second. var wg sync.WaitGroup @@ -185,14 +185,14 @@ func TestContractRelease(t *testing.T) { if err != nil { t.Fatal(err) } - verify(fcid, lockID, time.Now().Add(time.Minute), 3*time.Second) + verify(fcid, lockID) // Release one more time. Should decrease the references to 0 and reset // fields. if err := locks.Release(fcid, lockID); err != nil { t.Error(err) } - verify(fcid, 0, time.Time{}, 0) + verify(fcid, 0) // Try to release lock again. Is a no-op. if err := locks.Release(fcid, lockID); err != nil { diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 47668ff94..f9008a4d5 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -41,6 +41,7 @@ func readInput(context string) string { } // wrapANSI wraps the output in ANSI escape codes if enabled. +// nolint: unparam func wrapANSI(prefix, output, suffix string) string { if enableANSI { return prefix + output + suffix diff --git a/internal/node/node.go b/internal/node/node.go index 40f7f70d8..d105cbfb2 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -180,11 +180,8 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht } shutdownFn := func(ctx context.Context) error { + close(cancelSubscribe) return errors.Join( - func() error { - close(cancelSubscribe) - return nil - }(), g.Close(), cs.Close(), tp.Close(), diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 5ca7141d5..a43c86607 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -1098,7 +1098,7 @@ func TestParallelUpload(t *testing.T) { w := cluster.Worker tt := cluster.tt - upload := func() error { + upload := func() { t.Helper() // prepare some data - make sure it's more than one sector data := make([]byte, rhpv2.SectorSize) @@ -1107,7 +1107,6 @@ func TestParallelUpload(t *testing.T) { // upload the data path := fmt.Sprintf("/dir/data_%v", hex.EncodeToString(data[:16])) tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) - return nil } // Upload in parallel @@ -1116,10 +1115,7 @@ func TestParallelUpload(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - if err := upload(); err != nil { - t.Error(err) - return - } + upload() }() } wg.Wait() diff --git a/stores/hostdb.go b/stores/hostdb.go index 0b59789df..fd23abf4a 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -87,12 +87,6 @@ type ( Hosts []dbHost `gorm:"many2many:host_allowlist_entry_hosts;constraint:OnDelete:CASCADE"` } - // dbHostAllowlistEntryHost is a join table between dbAllowlistEntry and dbHost. - dbHostAllowlistEntryHost struct { - DBAllowlistEntryID uint `gorm:"primaryKey"` - DBHostID uint `gorm:"primaryKey;index"` - } - // dbBlocklistEntry defines a table that stores the host blocklist. dbBlocklistEntry struct { Model @@ -100,12 +94,6 @@ type ( Hosts []dbHost `gorm:"many2many:host_blocklist_entry_hosts;constraint:OnDelete:CASCADE"` } - // dbHostBlocklistEntryHost is a join table between dbBlocklistEntry and dbHost. - dbHostBlocklistEntryHost struct { - DBBlocklistEntryID uint `gorm:"primaryKey"` - DBHostID uint `gorm:"primaryKey;index"` - } - dbConsensusInfo struct { Model CCID []byte @@ -278,15 +266,9 @@ func (dbHost) TableName() string { return "hosts" } // TableName implements the gorm.Tabler interface. func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } -// TableName implements the gorm.Tabler interface. -func (dbHostAllowlistEntryHost) TableName() string { return "host_allowlist_entry_hosts" } - // TableName implements the gorm.Tabler interface. func (dbBlocklistEntry) TableName() string { return "host_blocklist_entries" } -// TableName implements the gorm.Tabler interface. -func (dbHostBlocklistEntryHost) TableName() string { return "host_blocklist_entry_hosts" } - // convert converts a host into a hostdb.Host. func (h dbHost) convert() hostdb.Host { var lastScan time.Time diff --git a/stores/metadata.go b/stores/metadata.go index c45422cfa..d13285e05 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2420,7 +2420,7 @@ func (s *SQLStore) objectRaw(txn *gorm.DB, bucket string, path string) (rows raw // contract retrieves a contract from the store. func (s *SQLStore) contract(ctx context.Context, id fileContractID) (dbContract, error) { - return contract(s.db, id) + return contract(s.db.WithContext(ctx), id) } // PackedSlabsForUpload returns up to 'limit' packed slabs that are ready for @@ -2806,6 +2806,7 @@ func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []fileC }) } +// nolint:unparam func sqlConcat(db *gorm.DB, a, b string) string { if isSQLite(db) { return fmt.Sprintf("%s || %s", a, b) @@ -2820,6 +2821,7 @@ func sqlRandomTimestamp(db *gorm.DB, now time.Time, min, max time.Duration) clau return gorm.Expr("FLOOR(? + RAND() * (? - ?))", now.Add(min).Unix(), int(max.Seconds()), int(min.Seconds())) } +// nolint:unparam func sqlWhereBucket(objTable string, bucket string) clause.Expr { return gorm.Expr(fmt.Sprintf("%s.db_bucket_id = (SELECT id FROM buckets WHERE buckets.name = ?)", objTable), bucket) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index dcd4aaa9d..5317f6eda 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -23,10 +23,10 @@ import ( "lukechampine.com/frand" ) -func generateMultisigUC(m, n uint64, salt string) types.UnlockConditions { +func randomMultisigUC() types.UnlockConditions { uc := types.UnlockConditions{ - PublicKeys: make([]types.UnlockKey, n), - SignaturesRequired: uint64(m), + PublicKeys: make([]types.UnlockKey, 2), + SignaturesRequired: 1, } for i := range uc.PublicKeys { uc.PublicKeys[i].Algorithm = types.SpecifierEd25519 @@ -224,7 +224,7 @@ func TestSQLContractStore(t *testing.T) { } // Create random unlock conditions for the host. - uc := generateMultisigUC(1, 2, "salt") + uc := randomMultisigUC() uc.PublicKeys[1].Key = hk[:] uc.Timelock = 192837 @@ -519,11 +519,11 @@ func TestRenewedContract(t *testing.T) { } // Create random unlock conditions for the hosts. - uc := generateMultisigUC(1, 2, "salt") + uc := randomMultisigUC() uc.PublicKeys[1].Key = hk[:] uc.Timelock = 192837 - uc2 := generateMultisigUC(1, 2, "salt") + uc2 := randomMultisigUC() uc2.PublicKeys[1].Key = hk2[:] uc2.Timelock = 192837 @@ -873,7 +873,7 @@ func TestArchiveContracts(t *testing.T) { } func testContractRevision(fcid types.FileContractID, hk types.PublicKey) rhpv2.ContractRevision { - uc := generateMultisigUC(1, 2, "salt") + uc := randomMultisigUC() uc.PublicKeys[1].Key = hk[:] uc.Timelock = 192837 return rhpv2.ContractRevision{ @@ -3390,7 +3390,7 @@ func TestMarkSlabUploadedAfterRenew(t *testing.T) { // renew the contract. fcidRenewed := types.FileContractID{2, 2, 2, 2, 2} - uc := generateMultisigUC(1, 2, "salt") + uc := randomMultisigUC() rev := rhpv2.ContractRevision{ Revision: types.FileContractRevision{ ParentID: fcidRenewed, diff --git a/stores/migrations.go b/stores/migrations.go index 6395b52bb..aaa26a693 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -68,7 +68,7 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema(db, dbIdentifier, logger)) + m.InitSchema(initSchema(dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go index fc3164bee..25895c4f2 100644 --- a/stores/migrations_metrics.go +++ b/stores/migrations_metrics.go @@ -27,7 +27,7 @@ func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema(db, dbIdentifier, logger)) + m.InitSchema(initSchema(dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations_utils.go b/stores/migrations_utils.go index 46d7f3dc4..0692b367f 100644 --- a/stores/migrations_utils.go +++ b/stores/migrations_utils.go @@ -10,7 +10,7 @@ import ( // initSchema is executed only on a clean database. Otherwise the individual // migrations are executed. -func initSchema(db *gorm.DB, name string, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { +func initSchema(name string, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { return func(tx *gorm.DB) error { logger.Infof("initializing '%s' schema", name) diff --git a/stores/sql.go b/stores/sql.go index 53ac96207..f62dba97f 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -2,7 +2,6 @@ package stores import ( "context" - "database/sql" "embed" "errors" "fmt" @@ -514,7 +513,7 @@ func (ss *SQLStore) applyUpdates(force bool) error { return nil } -func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) error { +func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) error) error { abortRetry := func(err error) bool { if err == nil || errors.Is(err, context.Canceled) || @@ -540,7 +539,7 @@ func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) er } var err error for i := 0; i < len(s.retryTransactionIntervals); i++ { - err = s.db.WithContext(ctx).Transaction(fc, opts...) + err = s.db.WithContext(ctx).Transaction(fc) if abortRetry(err) { return err } diff --git a/worker/download.go b/worker/download.go index 3a58bbc98..c8db95f35 100644 --- a/worker/download.go +++ b/worker/download.go @@ -495,11 +495,11 @@ func (mgr *downloadManager) refreshDownloaders(contracts []api.ContractMetadata) host := mgr.hm.Host(c.HostKey, c.ID, c.SiamuxAddr) downloader := newDownloader(mgr.shutdownCtx, host) mgr.downloaders[c.HostKey] = downloader - go downloader.processQueue(mgr.hm) + go downloader.processQueue() } } -func (mgr *downloadManager) newSlabDownload(ctx context.Context, slice object.SlabSlice, migration bool) *slabDownload { +func (mgr *downloadManager) newSlabDownload(slice object.SlabSlice, migration bool) *slabDownload { // calculate the offset and length offset, length := slice.SectorRegion() @@ -529,7 +529,7 @@ func (mgr *downloadManager) newSlabDownload(ctx context.Context, slice object.Sl func (mgr *downloadManager) downloadSlab(ctx context.Context, slice object.SlabSlice, migration bool) ([][]byte, bool, error) { // prepare new download - slab := mgr.newSlabDownload(ctx, slice, migration) + slab := mgr.newSlabDownload(slice, migration) // execute download return slab.download(ctx) diff --git a/worker/downloader.go b/worker/downloader.go index 24be245fc..46dac61e3 100644 --- a/worker/downloader.go +++ b/worker/downloader.go @@ -245,7 +245,7 @@ func (d *downloader) processBatch(batch []*sectorDownloadReq) chan struct{} { return doneChan } -func (d *downloader) processQueue(hp HostManager) { +func (d *downloader) processQueue() { outer: for { // wait for work diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 02cdce4ff..9f05904a4 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -277,7 +277,7 @@ func (w *worker) FetchSignedRevision(ctx context.Context, hostIP string, hostKey func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types.PublicKey, fcid types.FileContractID, lastKnownRevisionNumber uint64) (deleted, remaining uint64, err error) { err = w.withContractLock(ctx, fcid, lockingPriorityPruning, func() error { return w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { - return w.withRevisionV2(ctx, defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { + return w.withRevisionV2(defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { // perform gouging checks gc, err := GougingCheckerFromContext(ctx, false) if err != nil { @@ -510,7 +510,7 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi func (w *worker) FetchContractRoots(ctx context.Context, hostIP string, hostKey types.PublicKey, fcid types.FileContractID, lastKnownRevisionNumber uint64) (roots []types.Hash256, err error) { err = w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { - return w.withRevisionV2(ctx, defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { + return w.withRevisionV2(defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { gc, err := GougingCheckerFromContext(ctx, false) if err != nil { return err @@ -641,7 +641,7 @@ func (w *worker) withTransportV2(ctx context.Context, hostKey types.PublicKey, h return fn(t) } -func (w *worker) withRevisionV2(ctx context.Context, lockTimeout time.Duration, t *rhpv2.Transport, hk types.PublicKey, fcid types.FileContractID, lastKnownRevisionNumber uint64, fn func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) error) error { +func (w *worker) withRevisionV2(lockTimeout time.Duration, t *rhpv2.Transport, hk types.PublicKey, fcid types.FileContractID, lastKnownRevisionNumber uint64, fn func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) error) error { renterKey := w.deriveRenterKey(hk) // execute lock RPC diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 9c280f2bd..0ac20e13d 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -177,7 +177,7 @@ type transportPoolV3 struct { pool map[string]*transportV3 } -func newTransportPoolV3(w *worker) *transportPoolV3 { +func newTransportPoolV3() *transportPoolV3 { return &transportPoolV3{ pool: make(map[string]*transportV3), } @@ -365,7 +365,7 @@ func (w *worker) initTransportPool() { if w.transportPoolV3 != nil { panic("transport pool already initialized") // developer error } - w.transportPoolV3 = newTransportPoolV3(w) + w.transportPoolV3 = newTransportPoolV3() } // ForHost returns an account to use for a given host. If the account diff --git a/worker/upload.go b/worker/upload.go index c5e86a166..338163edd 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -397,7 +397,7 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a } // create the upload - upload, err := mgr.newUpload(ctx, up.rs.TotalShards, contracts, up.bh, lockPriority) + upload, err := mgr.newUpload(up.rs.TotalShards, contracts, up.bh, lockPriority) if err != nil { return false, "", err } @@ -558,7 +558,7 @@ func (mgr *uploadManager) UploadPackedSlab(ctx context.Context, rs api.Redundanc shards := encryptPartialSlab(ps.Data, ps.Key, uint8(rs.MinShards), uint8(rs.TotalShards)) // create the upload - upload, err := mgr.newUpload(ctx, len(shards), contracts, bh, lockPriority) + upload, err := mgr.newUpload(len(shards), contracts, bh, lockPriority) if err != nil { return err } @@ -603,7 +603,7 @@ func (mgr *uploadManager) UploadShards(ctx context.Context, s *object.Slab, shar defer cancel() // create the upload - upload, err := mgr.newUpload(ctx, len(shards), contracts, bh, lockPriority) + upload, err := mgr.newUpload(len(shards), contracts, bh, lockPriority) if err != nil { return err } @@ -675,7 +675,7 @@ func (mgr *uploadManager) candidates(allowed map[types.PublicKey]struct{}) (cand return } -func (mgr *uploadManager) newUpload(ctx context.Context, totalShards int, contracts []api.ContractMetadata, bh uint64, lockPriority int) (*upload, error) { +func (mgr *uploadManager) newUpload(totalShards int, contracts []api.ContractMetadata, bh uint64, lockPriority int) (*upload, error) { mgr.mu.Lock() defer mgr.mu.Unlock() diff --git a/worker/worker.go b/worker/worker.go index 9e4dacdd2..db287585a 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1540,7 +1540,7 @@ func discardTxnOnErr(ctx context.Context, bus Bus, l *zap.SugaredLogger, txn typ ctx, cancel := context.WithTimeout(ctx, 10*time.Second) if dErr := bus.WalletDiscard(ctx, txn); dErr != nil { - l.Errorf("%w: failed to discard txn: %v", *err, dErr) + l.Errorf("%w: %v, failed to discard txn: %v", *err, errContext, dErr) } cancel() } From ce60217e6bd30d1442ec6b823534f4ffff655764 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 12 Mar 2024 18:25:46 +0100 Subject: [PATCH 023/201] lint: add deadcode --- .golangci.yml | 1 + stores/metadata.go | 3 ++- worker/worker.go | 1 - 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ace11db65..9aad5bd19 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -104,6 +104,7 @@ linters: - tagliatelle - unused - unparam + - deadcode issues: # Maximum issues count per one linter. Set to 0 to disable. Default is 50. diff --git a/stores/metadata.go b/stores/metadata.go index d13285e05..45ae0a41c 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -32,8 +32,9 @@ const ( // 10/30 erasure coding and takes <1s to execute on an SSD in SQLite. refreshHealthBatchSize = 10000 + // sectorInsertionBatchSize is the number of sectors per batch when we + // upsert sectors. sectorInsertionBatchSize = 500 - sectorQueryBatchSize = 100 refreshHealthMinHealthValidity = 12 * time.Hour refreshHealthMaxHealthValidity = 72 * time.Hour diff --git a/worker/worker.go b/worker/worker.go index db287585a..d3e83bd50 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -42,7 +42,6 @@ const ( lockingPriorityActiveContractRevision = 100 lockingPriorityRenew = 80 - lockingPriorityPriceTable = 60 lockingPriorityFunding = 40 lockingPrioritySyncing = 30 lockingPriorityPruning = 20 From c3b3a4f2a7c30b39d244ea10e01f3704c3e9f35e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 12 Mar 2024 18:37:54 +0100 Subject: [PATCH 024/201] stores: check if object exists before deleting --- stores/metadata.go | 71 +++++++++++++++++------------------------ stores/metadata_test.go | 21 ++++++------ 2 files changed, 42 insertions(+), 50 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index e6dcf9363..da98a354d 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1723,8 +1723,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, usedContracts := o.Contracts() // UpdateObject is ACID. - var nDeleted int64 - err := s.retryTransaction(func(tx *gorm.DB) error { + return s.retryTransaction(func(tx *gorm.DB) error { // Try to delete. We want to get rid of the object and its slices if it // exists. // @@ -1735,42 +1734,27 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // NOTE: the metadata is not deleted because this delete will cascade, // if we stop recreating the object we have to make sure to delete the // object's metadata before trying to recreate it - var err error - resp := tx.Exec("DELETE FROM objects WHERE object_id = ? AND db_bucket_id = ?", path, bucketID) - if resp.Error != nil { - return fmt.Errorf("UpdateObject: failed to delete object: %w", resp.Error) + _, err := s.deleteObject(tx, bucket, path) + if err != nil { + return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } - nDeleted = resp.RowsAffected // Insert a new object. objKey, err := o.Key.MarshalBinary() if err != nil { return fmt.Errorf("failed to marshal object key: %w", err) } - err = tx.Model(&dbObject{}). - Create(map[string]any{ - "created_at": time.Now(), - "db_bucket_id": bucketID, - "object_id": path, - "key": objKey, - "size": o.TotalSize(), - "mime_type": mimeType, - "etag": eTag, - }).Error - if err != nil { - return fmt.Errorf("failed to create object: %w", err) + obj := dbObject{ + DBBucketID: bucketID, + ObjectID: path, + Key: objKey, + Size: o.TotalSize(), + MimeType: mimeType, + Etag: eTag, } - - // Get the id of the object - var objID uint - err = tx.Model(&dbObject{}). - Select("id"). - Where("object_id = ?", path). - Where("db_bucket_id", bucketID). - Scan(&objID). - Error + err = tx.Create(&obj).Error if err != nil { - return err + return fmt.Errorf("failed to create object: %w", err) } // Fetch contract set. @@ -1786,25 +1770,16 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } // Create all slices. This also creates any missing slabs or sectors. - if err := s.createSlices(tx, &objID, nil, cs.ID, contracts, o.Slabs); err != nil { + if err := s.createSlices(tx, &obj.ID, nil, cs.ID, contracts, o.Slabs); err != nil { return fmt.Errorf("failed to create slices: %w", err) } // Create all user metadata. - if err := s.createUserMetadata(tx, objID, metadata); err != nil { + if err := s.createUserMetadata(tx, obj.ID, metadata); err != nil { return fmt.Errorf("failed to create user metadata: %w", err) } return nil }) - if err != nil { - return err - } - if nDeleted > 0 { - if err := s.retryTransaction(pruneSlabs); err != nil { - return fmt.Errorf("UpdateObject: failed to prune slabs: %w", err) - } - } - return pruneSlabs(s.db) } func (s *SQLStore) RemoveObject(ctx context.Context, bucket, key string) error { @@ -2757,7 +2732,21 @@ AND slabs.db_buffered_slab_id IS NULL // without an obect after the deletion. That means in case of packed uploads, // the slab is only deleted when no more objects point to it. func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, error) { - tx = tx.Where("object_id = ? AND ?", path, sqlWhereBucket("objects", bucket)). + // check if the object exists first to avoid unnecessary locking for the + // common case + var objID uint + resp := tx.Model(&dbObject{}). + Where("object_id = ? AND ?", path, sqlWhereBucket("objects", bucket)). + Select("id"). + Limit(1). + Scan(&objID) + if err := resp.Error; err != nil { + return 0, err + } else if resp.RowsAffected == 0 { + return 0, nil + } + + tx = tx.Where("id", objID). Delete(&dbObject{}) if tx.Error != nil { return 0, tx.Error diff --git a/stores/metadata_test.go b/stores/metadata_test.go index dbb688d9b..02409664a 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4550,6 +4550,11 @@ func TestTypeCurrency(t *testing.T) { // TestUpdateObjectParallel calls UpdateObject from multiple threads in parallel // while retries are disabled to make sure calling the same method from multiple // threads won't cause deadlocks. +// +// NOTE: This test only covers the optimistic case of inserting objects without +// overwriting them. As soon as combining deletions and insertions within the +// same transaction, deadlocks become more likely due to the gap locks MySQL +// uses. func TestUpdateObjectParallel(t *testing.T) { cfg := defaultTestSQLStoreConfig @@ -4591,7 +4596,7 @@ func TestUpdateObjectParallel(t *testing.T) { Health: 1.0, Key: object.GenerateEncryptionKey(), MinShards: 1, - Shards: newTestShards(hk1, fcid1, types.Hash256{1}), + Shards: newTestShards(hk1, fcid1, frand.Entropy256()), }, Offset: 10, Length: 100, @@ -4601,7 +4606,7 @@ func TestUpdateObjectParallel(t *testing.T) { Health: 1.0, Key: object.GenerateEncryptionKey(), MinShards: 2, - Shards: newTestShards(hk2, fcid2, types.Hash256{2}), + Shards: newTestShards(hk2, fcid2, frand.Entropy256()), }, Offset: 20, Length: 200, @@ -4627,13 +4632,11 @@ func TestUpdateObjectParallel(t *testing.T) { } // create 1000 objects and then overwrite them - for i := 0; i < 2; i++ { - for j := 0; j < 1000; j++ { - select { - case c <- fmt.Sprintf("object-%d", j): - case <-ctx.Done(): - return - } + for i := 0; i < 1000; i++ { + select { + case c <- fmt.Sprintf("object-%d", i): + case <-ctx.Done(): + return } } From dc65a4a0e48bbddd80a95c2b6a06b0abb15eafba Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 09:15:41 +0100 Subject: [PATCH 025/201] stores: fetch bucket id within txn again --- stores/metadata.go | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index da98a354d..5dfd016ad 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1706,19 +1706,6 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } } - // check if bucket exists - doing so before the transaction to avoid - // deadlocks while risking the bucket updating in the meantime - var bucketID uint - if resp := s.db. - Model(&dbBucket{}). - Select("id"). - Where("name", bucket). - Scan(&bucketID); resp.Error != nil { - return resp.Error - } else if resp.RowsAffected == 0 { - return api.ErrBucketNotFound - } - // collect all used contracts usedContracts := o.Contracts() @@ -1739,7 +1726,15 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } - // Insert a new object. + // Insert a new object. + var bucketID uint + err = tx.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). + Take(&bucketID).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) + } else if err != nil { + return fmt.Errorf("failed to fetch bucket id: %w", err) + } objKey, err := o.Key.MarshalBinary() if err != nil { return fmt.Errorf("failed to marshal object key: %w", err) From 9afe29b5f3775719c30a98e8bd431958ab7cd582 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 09:39:25 +0100 Subject: [PATCH 026/201] stores: overwrite object instead of deleting it --- stores/metadata.go | 68 +++++++++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 25 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 5dfd016ad..6fde5ce10 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1709,37 +1709,27 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // collect all used contracts usedContracts := o.Contracts() + // fetch bucket id + var bucketID uint + err := s.db.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). + Take(&bucketID).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) + } else if err != nil { + return fmt.Errorf("failed to fetch bucket id: %w", err) + } + // UpdateObject is ACID. return s.retryTransaction(func(tx *gorm.DB) error { - // Try to delete. We want to get rid of the object and its slices if it - // exists. - // - // NOTE: the object's created_at is currently used as its ModTime, if we - // ever stop recreating the object but update it instead we need to take - // this into account - // - // NOTE: the metadata is not deleted because this delete will cascade, - // if we stop recreating the object we have to make sure to delete the - // object's metadata before trying to recreate it - _, err := s.deleteObject(tx, bucket, path) - if err != nil { - return fmt.Errorf("UpdateObject: failed to delete object: %w", err) - } - - // Insert a new object. - var bucketID uint - err = tx.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). - Take(&bucketID).Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) - } else if err != nil { - return fmt.Errorf("failed to fetch bucket id: %w", err) - } + // Insert a new object or update an existing one. objKey, err := o.Key.MarshalBinary() if err != nil { return fmt.Errorf("failed to marshal object key: %w", err) } obj := dbObject{ + Model: Model{ + CreatedAt: time.Now(), + }, DBBucketID: bucketID, ObjectID: path, Key: objKey, @@ -1747,7 +1737,11 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, MimeType: mimeType, Etag: eTag, } - err = tx.Create(&obj).Error + err = tx. + Clauses(clause.OnConflict{ + UpdateAll: true, + }). + Create(&obj).Error if err != nil { return fmt.Errorf("failed to create object: %w", err) } @@ -1764,11 +1758,35 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("failed to fetch used contracts: %w", err) } + // Delete any old slices. + var slices []dbSlice + if err := tx.Where("db_object_id = ?", obj.ID).Limit(1).Find(&slices).Error; err != nil { + return fmt.Errorf("failed to fetch slices: %w", err) + } + if len(slices) > 0 { + if err := tx.Where("db_object_id", obj.ID).Delete(&dbSlice{}).Error; err != nil { + return fmt.Errorf("failed to delete slices: %w", err) + } else if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) + } + } + // Create all slices. This also creates any missing slabs or sectors. if err := s.createSlices(tx, &obj.ID, nil, cs.ID, contracts, o.Slabs); err != nil { return fmt.Errorf("failed to create slices: %w", err) } + // Delete any old user metadata. + var oldMetadata []dbObjectUserMetadata + if err := tx.Where("db_object_id = ?", obj.ID).Limit(1).Find(&oldMetadata).Error; err != nil { + return fmt.Errorf("failed to fetch user metadata: %w", err) + } + if len(oldMetadata) > 0 { + if err := tx.Where("db_object_id", obj.ID).Delete(&dbObjectUserMetadata{}).Error; err != nil { + return fmt.Errorf("failed to delete user metadata: %w", err) + } + } + // Create all user metadata. if err := s.createUserMetadata(tx, obj.ID, metadata); err != nil { return fmt.Errorf("failed to create user metadata: %w", err) From 6490dd8b8be144ab7bac1dbc3fe6bc147aff0343 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 09:46:56 +0100 Subject: [PATCH 027/201] stores: fix TestSQLMetadataStore --- stores/metadata.go | 1 + stores/metadata_test.go | 13 ++++++------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 6fde5ce10..d0cf22a7f 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1739,6 +1739,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } err = tx. Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "db_bucket_id"}, {Name: "object_id"}}, UpdateAll: true, }). Create(&obj).Error diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 02409664a..fef6bca1b 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1056,11 +1056,10 @@ func TestSQLMetadataStore(t *testing.T) { // The expected object is the same except for some ids which were // incremented due to the object and slab being overwritten. - two := uint(2) - expectedObj.Slabs[0].DBObjectID = &two - expectedObj.Slabs[0].DBSlabID = 1 - expectedObj.Slabs[1].DBObjectID = &two - expectedObj.Slabs[1].DBSlabID = 2 + expectedObj.Slabs[0].DBObjectID = &one + expectedObj.Slabs[0].DBSlabID = 3 + expectedObj.Slabs[1].DBObjectID = &one + expectedObj.Slabs[1].DBSlabID = 4 if !reflect.DeepEqual(obj, expectedObj) { t.Fatal("object mismatch", cmp.Diff(obj, expectedObj)) } @@ -1082,7 +1081,7 @@ func TestSQLMetadataStore(t *testing.T) { TotalShards: 1, Shards: []dbSector{ { - DBSlabID: 1, + DBSlabID: 3, SlabIndex: 1, Root: obj1.Slabs[0].Shards[0].Root[:], LatestHost: publicKey(obj1.Slabs[0].Shards[0].LatestHost), @@ -1122,7 +1121,7 @@ func TestSQLMetadataStore(t *testing.T) { TotalShards: 1, Shards: []dbSector{ { - DBSlabID: 2, + DBSlabID: 4, SlabIndex: 1, Root: obj1.Slabs[1].Shards[0].Root[:], LatestHost: publicKey(obj1.Slabs[1].Shards[0].LatestHost), From 3351f91ebf7b05f6504bf19f24928d78e001871b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 10:49:57 +0100 Subject: [PATCH 028/201] s3: extend tests to check etag on GetObject and HeadObject and fix necessary code --- api/object.go | 7 ++++++- internal/test/e2e/s3_test.go | 24 +++++++++++++++++++++++- s3/backend.go | 28 +++++++++++++++++++++------- s3/s3.go | 1 + worker/client/client.go | 7 +++++++ worker/worker.go | 2 +- 6 files changed, 59 insertions(+), 10 deletions(-) diff --git a/api/object.go b/api/object.go index 4b1993341..60db9413a 100644 --- a/api/object.go +++ b/api/object.go @@ -92,6 +92,7 @@ type ( // HeadObjectResponse is the response type for the HEAD /worker/object endpoint. HeadObjectResponse struct { ContentType string `json:"contentType"` + Etag string `json:"etag"` LastModified string `json:"lastModified"` Range *DownloadRange `json:"range,omitempty"` Size int64 `json:"size"` @@ -212,7 +213,8 @@ type ( } HeadObjectOptions struct { - Range DownloadRange + IgnoreDelim bool + Range DownloadRange } DownloadObjectOptions struct { @@ -318,6 +320,9 @@ func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { h.Set("Range", fmt.Sprintf("bytes=%v-%v", opts.Range.Offset, opts.Range.Offset+opts.Range.Length-1)) } } + if opts.IgnoreDelim { + h.Set("ignoreDelim", "true") + } } func (opts GetObjectOptions) Apply(values url.Values) { diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index c8c6bb334..3ddbb0bdd 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -92,6 +92,10 @@ func TestS3Basic(t *testing.T) { t.Fatal(err) } else if !bytes.Equal(b, data) { t.Fatal("data mismatch") + } else if info, err := obj.Stat(); err != nil { + t.Fatal(err) + } else if info.ETag != uploadInfo.ETag { + t.Fatal("unexpected ETag:", info.ETag, uploadInfo.ETag) } // stat object @@ -99,6 +103,8 @@ func TestS3Basic(t *testing.T) { tt.OK(err) if info.Size != int64(len(data)) { t.Fatal("size mismatch") + } else if info.ETag != uploadInfo.ETag { + t.Fatal("unexpected ETag:", info.ETag) } // add another bucket @@ -580,12 +586,28 @@ func TestS3MultipartUploads(t *testing.T) { } // Download object + expectedData := []byte("helloworld!") downloadedObj, err := s3.GetObject(context.Background(), "multipart", "foo", minio.GetObjectOptions{}) tt.OK(err) if data, err := io.ReadAll(downloadedObj); err != nil { t.Fatal(err) - } else if !bytes.Equal(data, []byte("helloworld!")) { + } else if !bytes.Equal(data, expectedData) { t.Fatal("unexpected data:", string(data)) + } else if info, err := downloadedObj.Stat(); err != nil { + t.Fatal(err) + } else if info.ETag != ui.ETag { + t.Fatal("unexpected ETag:", info.ETag) + } else if info.Size != int64(len(expectedData)) { + t.Fatal("unexpected size:", info.Size) + } + + // Stat object + if info, err := s3.StatObject(context.Background(), "multipart", "foo", minio.StatObjectOptions{}); err != nil { + t.Fatal(err) + } else if info.ETag != ui.ETag { + t.Fatal("unexpected ETag:", info.ETag) + } else if info.Size != int64(len(expectedData)) { + t.Fatal("unexpected size:", info.Size) } // Download again with range request. diff --git a/s3/backend.go b/s3/backend.go index 7b5ea74f9..5261bd5f7 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -3,6 +3,7 @@ package s3 import ( "bytes" "context" + "encoding/hex" "fmt" "io" "strings" @@ -268,7 +269,14 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range res.Metadata["Content-Type"] = res.ContentType res.Metadata["Last-Modified"] = res.LastModified + // etag to bytes + etag, err := hex.DecodeString(res.Etag) + if err != nil { + return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) + } + return &gofakes3.Object{ + Hash: etag, Name: gofakes3.URLEncode(objectName), Metadata: res.Metadata, Size: res.Size, @@ -287,9 +295,8 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // HeadObject should return a NotFound() error if the object does not // exist. func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { - res, err := s.b.Object(ctx, bucketName, objectName, api.GetObjectOptions{ - IgnoreDelim: true, - OnlyMetadata: true, + res, err := s.w.HeadObject(ctx, bucketName, objectName, api.HeadObjectOptions{ + IgnoreDelim: true, }) if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { return nil, gofakes3.KeyNotFound(objectName) @@ -299,18 +306,25 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go // set user metadata metadata := make(map[string]string) - for k, v := range res.Object.Metadata { + for k, v := range res.Metadata { metadata[amazonMetadataPrefix+k] = v } // decorate metadata - metadata["Content-Type"] = res.Object.MimeType - metadata["Last-Modified"] = res.Object.LastModified() + metadata["Content-Type"] = res.ContentType + metadata["Last-Modified"] = res.LastModified + + // etag to bytes + hash, err := hex.DecodeString(res.Etag) + if err != nil { + return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) + } return &gofakes3.Object{ + Hash: hash, Name: gofakes3.URLEncode(objectName), Metadata: metadata, - Size: res.Object.Size, + Size: res.Size, Contents: io.NopCloser(bytes.NewReader(nil)), }, nil } diff --git a/s3/s3.go b/s3/s3.go index 95c2e98e6..0ac1dbd49 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -48,6 +48,7 @@ type bus interface { type worker interface { GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) + HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) } diff --git a/worker/client/client.go b/worker/client/client.go index 410e4c66e..206b70d8b 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -87,6 +87,7 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) + path = api.ObjectPathEscape(path) path += "?" + values.Encode() // TODO: support HEAD in jape client @@ -325,6 +326,7 @@ func parseObjectResponseHeaders(header http.Header) (api.HeadObjectResponse, err return api.HeadObjectResponse{ ContentType: header.Get("Content-Type"), + Etag: trimEtag(header.Get("ETag")), LastModified: header.Get("Last-Modified"), Range: r, Size: size, @@ -347,3 +349,8 @@ func sizeFromSeeker(r io.Reader) (int64, error) { } return size, nil } + +func trimEtag(etag string) string { + etag = strings.TrimPrefix(etag, "\"") + return strings.TrimSuffix(etag, "\"") +} diff --git a/worker/worker.go b/worker/worker.go index c2e08f915..651f5e2b3 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -872,7 +872,7 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { res, err := w.bus.Object(jc.Request.Context(), bucket, path, api.GetObjectOptions{ OnlyMetadata: true, }) - if errors.Is(err, api.ErrObjectNotFound) { + if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { jc.Error(err, http.StatusNotFound) return } else if err != nil { From 08cecc9b85d0aa3a4334fcf64732dbcb87c7a9c4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 11:02:30 +0100 Subject: [PATCH 029/201] stores: fix TestSQLMetadattaStore --- stores/metadata.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stores/metadata.go b/stores/metadata.go index d0cf22a7f..f84b8151c 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1747,6 +1747,11 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("failed to create object: %w", err) } + // Fetch object id for the created or updated object. + if err := tx.Model(&dbObject{}).Select("id").Scan(&obj.ID).Error; err != nil { + return fmt.Errorf("failed to fetch object id: %w", err) + } + // Fetch contract set. var cs dbContractSet if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { From 04d0978031f79910aa4c5e366a0124956c33c2a0 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 11:08:43 +0100 Subject: [PATCH 030/201] utils: compare lower case --- internal/utils/errors.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/utils/errors.go b/internal/utils/errors.go index a8c4bbf59..b884cde70 100644 --- a/internal/utils/errors.go +++ b/internal/utils/errors.go @@ -14,5 +14,7 @@ func IsErr(err error, target error) bool { } else if errors.Is(err, target) { return true } - return strings.Contains(err.Error(), target.Error()) + // TODO: we can get rid of the lower casing once siad is gone and + // renterd/hostd use the same error messages + return strings.Contains(strings.ToLower(err.Error()), strings.ToLower(target.Error())) } From c5334a55684b2330854c88804698a1c140067443 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 11:28:53 +0100 Subject: [PATCH 031/201] stores: only run TestUpdateObjectParallel against MySQL --- stores/metadata.go | 77 +++++++++++++++-------------------------- stores/metadata_test.go | 13 ++++--- 2 files changed, 37 insertions(+), 53 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index f84b8151c..cfcde4744 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1709,27 +1709,39 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // collect all used contracts usedContracts := o.Contracts() - // fetch bucket id - var bucketID uint - err := s.db.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). - Take(&bucketID).Error - if errors.Is(err, gorm.ErrRecordNotFound) { - return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) - } else if err != nil { - return fmt.Errorf("failed to fetch bucket id: %w", err) - } - // UpdateObject is ACID. return s.retryTransaction(func(tx *gorm.DB) error { - // Insert a new object or update an existing one. + // Try to delete. We want to get rid of the object and its slices if it + // exists. + // + // NOTE: the object's created_at is currently used as its ModTime, if we + // ever stop recreating the object but update it instead we need to take + // this into account + // + // NOTE: the metadata is not deleted because this delete will cascade, + // if we stop recreating the object we have to make sure to delete the + // object's metadata before trying to recreate it + _, err := s.deleteObject(tx, bucket, path) + if err != nil { + return fmt.Errorf("UpdateObject: failed to delete object: %w", err) + } + + // Insert a new object. objKey, err := o.Key.MarshalBinary() if err != nil { return fmt.Errorf("failed to marshal object key: %w", err) } + // fetch bucket id + var bucketID uint + err = s.db.Table("(SELECT id from buckets WHERE buckets.name = ?) bucket_id", bucket). + Take(&bucketID).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return fmt.Errorf("bucket %v not found: %w", bucket, api.ErrBucketNotFound) + } else if err != nil { + return fmt.Errorf("failed to fetch bucket id: %w", err) + } + obj := dbObject{ - Model: Model{ - CreatedAt: time.Now(), - }, DBBucketID: bucketID, ObjectID: path, Key: objKey, @@ -1737,21 +1749,11 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, MimeType: mimeType, Etag: eTag, } - err = tx. - Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "db_bucket_id"}, {Name: "object_id"}}, - UpdateAll: true, - }). - Create(&obj).Error + err = tx.Create(&obj).Error if err != nil { return fmt.Errorf("failed to create object: %w", err) } - // Fetch object id for the created or updated object. - if err := tx.Model(&dbObject{}).Select("id").Scan(&obj.ID).Error; err != nil { - return fmt.Errorf("failed to fetch object id: %w", err) - } - // Fetch contract set. var cs dbContractSet if err := tx.Take(&cs, "name = ?", contractSet).Error; err != nil { @@ -1764,39 +1766,16 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("failed to fetch used contracts: %w", err) } - // Delete any old slices. - var slices []dbSlice - if err := tx.Where("db_object_id = ?", obj.ID).Limit(1).Find(&slices).Error; err != nil { - return fmt.Errorf("failed to fetch slices: %w", err) - } - if len(slices) > 0 { - if err := tx.Where("db_object_id", obj.ID).Delete(&dbSlice{}).Error; err != nil { - return fmt.Errorf("failed to delete slices: %w", err) - } else if err := pruneSlabs(tx); err != nil { - return fmt.Errorf("failed to prune slabs: %w", err) - } - } - // Create all slices. This also creates any missing slabs or sectors. if err := s.createSlices(tx, &obj.ID, nil, cs.ID, contracts, o.Slabs); err != nil { return fmt.Errorf("failed to create slices: %w", err) } - // Delete any old user metadata. - var oldMetadata []dbObjectUserMetadata - if err := tx.Where("db_object_id = ?", obj.ID).Limit(1).Find(&oldMetadata).Error; err != nil { - return fmt.Errorf("failed to fetch user metadata: %w", err) - } - if len(oldMetadata) > 0 { - if err := tx.Where("db_object_id", obj.ID).Delete(&dbObjectUserMetadata{}).Error; err != nil { - return fmt.Errorf("failed to delete user metadata: %w", err) - } - } - // Create all user metadata. if err := s.createUserMetadata(tx, obj.ID, metadata); err != nil { return fmt.Errorf("failed to create user metadata: %w", err) } + return nil }) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index fef6bca1b..47d004957 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1056,9 +1056,10 @@ func TestSQLMetadataStore(t *testing.T) { // The expected object is the same except for some ids which were // incremented due to the object and slab being overwritten. - expectedObj.Slabs[0].DBObjectID = &one + two := uint(2) + expectedObj.Slabs[0].DBObjectID = &two expectedObj.Slabs[0].DBSlabID = 3 - expectedObj.Slabs[1].DBObjectID = &one + expectedObj.Slabs[1].DBObjectID = &two expectedObj.Slabs[1].DBSlabID = 4 if !reflect.DeepEqual(obj, expectedObj) { t.Fatal("object mismatch", cmp.Diff(obj, expectedObj)) @@ -4557,10 +4558,14 @@ func TestTypeCurrency(t *testing.T) { func TestUpdateObjectParallel(t *testing.T) { cfg := defaultTestSQLStoreConfig - // check if we are running against mysql and only persist if we aren't dbURI, _, _, _ := DBConfigFromEnv() if dbURI == "" { - cfg.persistent = true + // it's pretty much impossile to optimise for both sqlite and mysql at + // the same time so we skip this test for SQLite for now + // TODO: once we moved away from gorm and implement separate interfaces + // for SQLite and MySQL, we have more control over the used queries and + // can revisit this + t.SkipNow() } ss := newTestSQLStore(t, cfg) ss.retryTransactionIntervals = []time.Duration{0} // don't retry From 7e7c14e504e85b2127cc46c4cdc8c36f7b4b551b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 11:36:02 +0100 Subject: [PATCH 032/201] e2e: check etag for exact md5 value --- api/object.go | 2 +- internal/test/e2e/s3_test.go | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/api/object.go b/api/object.go index 60db9413a..f343455ef 100644 --- a/api/object.go +++ b/api/object.go @@ -92,7 +92,7 @@ type ( // HeadObjectResponse is the response type for the HEAD /worker/object endpoint. HeadObjectResponse struct { ContentType string `json:"contentType"` - Etag string `json:"etag"` + Etag string `json:"eTag"` LastModified string `json:"lastModified"` Range *DownloadRange `json:"range,omitempty"` Size int64 `json:"size"` diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 3ddbb0bdd..bb9745d51 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -3,6 +3,8 @@ package e2e import ( "bytes" "context" + "crypto/md5" + "encoding/hex" "errors" "fmt" "io" @@ -72,8 +74,12 @@ func TestS3Basic(t *testing.T) { // add object to the bucket data := frand.Bytes(10) + etag := md5.Sum(data) uploadInfo, err := s3.PutObject(context.Background(), bucket, objPath, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{}) tt.OK(err) + if uploadInfo.ETag != hex.EncodeToString(etag[:]) { + t.Fatalf("expected ETag %v, got %v", hex.EncodeToString(etag[:]), uploadInfo.ETag) + } busObject, err := cluster.Bus.Object(context.Background(), bucket, objPath, api.GetObjectOptions{}) tt.OK(err) if busObject.Object == nil { From e27b6f86ba8498fb5a3e759a2d69ea6ec2b847e4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 12:55:56 +0100 Subject: [PATCH 033/201] e2e: fix TestObjectMetadata --- internal/test/e2e/metadata_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index d11f6ba4e..af924f847 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -55,6 +55,8 @@ func TestObjectMetadata(t *testing.T) { } if !reflect.DeepEqual(gor.Metadata, opts.Metadata) { t.Fatal("metadata mismatch", gor.Metadata) + } else if gor.Etag == "" { + t.Fatal("missing etag") } // perform a HEAD request and assert the headers are all present @@ -63,6 +65,7 @@ func TestObjectMetadata(t *testing.T) { t.Fatal(err) } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ ContentType: or.Object.ContentType(), + Etag: gor.Etag, LastModified: or.Object.LastModified(), Range: &api.DownloadRange{Offset: 1, Length: 1, Size: int64(len(data))}, Size: int64(len(data)), From 5a4cc470a32718f8bae01a1de544accdcf4eb342 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 13:18:46 +0100 Subject: [PATCH 034/201] e2e: fix TestS3List --- api/object.go | 9 ++++++--- worker/client/client.go | 5 +---- worker/worker.go | 7 ++++++- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/api/object.go b/api/object.go index f343455ef..36cea9db8 100644 --- a/api/object.go +++ b/api/object.go @@ -312,6 +312,12 @@ func (opts DeleteObjectOptions) Apply(values url.Values) { } } +func (opts HeadObjectOptions) Apply(values url.Values) { + if opts.IgnoreDelim { + values.Set("ignoreDelim", "true") + } +} + func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { if opts.Range != (DownloadRange{}) { if opts.Range.Length == -1 { @@ -320,9 +326,6 @@ func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { h.Set("Range", fmt.Sprintf("bytes=%v-%v", opts.Range.Offset, opts.Range.Offset+opts.Range.Length-1)) } } - if opts.IgnoreDelim { - h.Set("ignoreDelim", "true") - } } func (opts GetObjectOptions) Apply(values url.Values) { diff --git a/worker/client/client.go b/worker/client/client.go index 206b70d8b..6ef70f338 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -81,12 +81,9 @@ func (c *Client) DownloadStats() (resp api.DownloadStatsResponse, err error) { func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { c.c.Custom("HEAD", fmt.Sprintf("/objects/%s", path), nil, nil) - if strings.HasSuffix(path, "/") { - return nil, errors.New("the given path is a directory, HEAD can only be performed on objects") - } - values := url.Values{} values.Set("bucket", url.QueryEscape(bucket)) + opts.Apply(values) path = api.ObjectPathEscape(path) path += "?" + values.Encode() diff --git a/worker/worker.go b/worker/worker.go index 651f5e2b3..518664709 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -860,16 +860,21 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } + var ignoreDelim bool + if jc.DecodeForm("ignoreDelim", &ignoreDelim) != nil { + return + } // parse path path := jc.PathParam("path") - if path == "" || strings.HasSuffix(path, "/") { + if !ignoreDelim && (path == "" || strings.HasSuffix(path, "/")) { jc.Error(errors.New("HEAD requests can only be performed on objects, not directories"), http.StatusBadRequest) return } // fetch object metadata res, err := w.bus.Object(jc.Request.Context(), bucket, path, api.GetObjectOptions{ + IgnoreDelim: ignoreDelim, OnlyMetadata: true, }) if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { From e5df42cc7f32b9a83b03b5f1718e54126c09b3e1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 13:19:57 +0100 Subject: [PATCH 035/201] autopilot: fix build --- autopilot/ipfilter_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/autopilot/ipfilter_test.go b/autopilot/ipfilter_test.go index 74b2e56c5..29fc3c8cf 100644 --- a/autopilot/ipfilter_test.go +++ b/autopilot/ipfilter_test.go @@ -8,6 +8,7 @@ import ( "time" "go.sia.tech/core/types" + "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -61,20 +62,20 @@ func TestIPResolver(t *testing.T) { // test lookup error r.setNextErr(errors.New("unknown error")) - if _, err := ipr.lookup("example.com:1234"); !isErr(err, errors.New("unknown error")) { + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errors.New("unknown error")) { t.Fatal("unexpected error", err) } // test IO timeout - no cache entry r.setNextErr(errIOTimeout) - if _, err := ipr.lookup("example.com:1234"); !isErr(err, errIOTimeout) { + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errIOTimeout) { t.Fatal("unexpected error", err) } // test IO timeout - expired cache entry ipr.cache["example.com:1234"] = ipCacheEntry{subnets: []string{"a"}} r.setNextErr(errIOTimeout) - if _, err := ipr.lookup("example.com:1234"); !isErr(err, errIOTimeout) { + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errIOTimeout) { t.Fatal("unexpected error", err) } @@ -89,19 +90,19 @@ func TestIPResolver(t *testing.T) { // test too many addresses - more than two r.setAddr("example.com", []net.IPAddr{{}, {}, {}}) - if _, err := ipr.lookup("example.com:1234"); !isErr(err, errTooManyAddresses) { + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errTooManyAddresses) { t.Fatal("unexpected error", err) } // test too many addresses - two of the same type r.setAddr("example.com", []net.IPAddr{{IP: net.IPv4(1, 2, 3, 4)}, {IP: net.IPv4(1, 2, 3, 4)}}) - if _, err := ipr.lookup("example.com:1234"); !isErr(err, errTooManyAddresses) { + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errTooManyAddresses) { t.Fatal("unexpected error", err) } // test invalid addresses r.setAddr("example.com", []net.IPAddr{{IP: ipv4Localhost}, {IP: net.IP{127, 0, 0, 2}}}) - if _, err := ipr.lookup("example.com:1234"); !isErr(err, errTooManyAddresses) { + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errTooManyAddresses) { t.Fatal("unexpected error", err) } From 931b23edf889110999d1eb2a28a6b2626d584b5c Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 13 Mar 2024 14:09:06 +0100 Subject: [PATCH 036/201] stores: add TestContractMetricsQueryPlan --- stores/metrics.go | 7 +++++- stores/sql_test.go | 53 ++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 8 deletions(-) diff --git a/stores/metrics.go b/stores/metrics.go index 333ed8a42..3f069bd0c 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -555,11 +555,16 @@ func (s *SQLStore) findAggregatedContractPeriods(start time.Time, n uint64, inte return fmt.Errorf("failed to fetch distinct contract ids: %w", err) } + var indexHint string + if !isSQLite(tx) { + indexHint = "USE INDEX (idx_contracts_fcid_timestamp)" + } + for intervalStart := start; intervalStart.Before(end); intervalStart = intervalStart.Add(interval) { intervalEnd := intervalStart.Add(interval) for _, fcid := range fcids { var metrics []dbContractMetric - err := tx.Raw("SELECT * FROM contracts WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", unixTimeMS(intervalStart), unixTimeMS(intervalEnd), fileContractID(fcid)). + err := tx.Raw(fmt.Sprintf("SELECT * FROM contracts %s WHERE contracts.timestamp >= ? AND contracts.timestamp < ? AND contracts.fcid = ? LIMIT 1", indexHint), unixTimeMS(intervalStart), unixTimeMS(intervalEnd), fileContractID(fcid)). Scan(&metrics).Error if err != nil { return fmt.Errorf("failed to fetch contract metrics: %w", err) diff --git a/stores/sql_test.go b/stores/sql_test.go index 776e3e10e..2a7686cea 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -332,20 +332,27 @@ type sqliteQueryPlan struct { Detail string `json:"detail"` } -func (p sqliteQueryPlan) usesIndex() bool { +func (p sqliteQueryPlan) usesIndex(index string) bool { d := strings.ToLower(p.Detail) - return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") + if index == "" { + return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") + } + return strings.Contains(d, fmt.Sprintf("using index %s", index)) } //nolint:tagliatelle type mysqlQueryPlan struct { Extra string `json:"Extra"` PossibleKeys string `json:"possible_keys"` + Key string `json:"key"` } -func (p mysqlQueryPlan) usesIndex() bool { - d := strings.ToLower(p.Extra) - return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") +func (p mysqlQueryPlan) usesIndex(index string) bool { + if index == "" { + d := strings.ToLower(p.Extra) + return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") + } + return p.Key == index } func TestQueryPlan(t *testing.T) { @@ -385,20 +392,52 @@ func TestQueryPlan(t *testing.T) { var explain sqliteQueryPlan if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { t.Fatal(err) - } else if !explain.usesIndex() { + } else if !explain.usesIndex("") { t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) } } else { var explain mysqlQueryPlan if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { t.Fatal(err) - } else if !explain.usesIndex() { + } else if !explain.usesIndex("") { t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) } } } } +func TestContractMetricsQueryPlan(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + query := "SELECT * FROM contracts WHERE contracts.timestamp >= 1 AND contracts.timestamp < 2 AND contracts.fcid = '' LIMIT 1" + queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) + + if isSQLite(ss.dbMetrics) { + // in SQLite the query uses the index we want by default + var explain sqliteQueryPlan + if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("index 'idx_contracts_fcid_timestamp' not used in query '%s', plan %+v", query, explain) + } + } else { + var explain mysqlQueryPlan + if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex("") || explain.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("index 'idx_contracts_fcid_timestamp' not expected to be used in query '%s' although it should use an index, plan %+v", query, explain) + } + + // update query to specify the index + if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", queryWithHint)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("index 'idx_contracts_fcid_timestamp' should've been used in query '%s', plan %+v", query, explain) + } + } +} + func TestApplyUpdatesErr(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From f3d76768bcb860d741aad35cbaf6cef3821dbcf5 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 13 Mar 2024 14:24:51 +0100 Subject: [PATCH 037/201] stores: add helpers to clean up TestQueryPlan and TestContractMetricsQueryPlan --- stores/sql_test.go | 74 ++++++++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/stores/sql_test.go b/stores/sql_test.go index 2a7686cea..170edbf87 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -388,20 +388,11 @@ func TestQueryPlan(t *testing.T) { } for _, query := range queries { - if isSQLite(ss.db) { - var explain sqliteQueryPlan - if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("") { - t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) - } - } else { - var explain mysqlQueryPlan - if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("") { - t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) - } + plan := queryPlan(ss.db) + if err := explainQuery(ss.db, query, plan); err != nil { + t.Fatal(err) + } else if !plan.usesIndex("") { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, plan) } } } @@ -409,35 +400,54 @@ func TestQueryPlan(t *testing.T) { func TestContractMetricsQueryPlan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() + db := ss.dbMetrics - query := "SELECT * FROM contracts WHERE contracts.timestamp >= 1 AND contracts.timestamp < 2 AND contracts.fcid = '' LIMIT 1" - queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) + query := "SELECT * FROM contracts c WHERE c.timestamp >= 1 AND c.timestamp < 2 AND c.fcid = '' LIMIT 1" + plan := queryPlan(db) + if err := explainQuery(db, query, plan); err != nil { + t.Fatal(err) + } - if isSQLite(ss.dbMetrics) { - // in SQLite the query uses the index we want by default - var explain sqliteQueryPlan - if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("index 'idx_contracts_fcid_timestamp' not used in query '%s', plan %+v", query, explain) + if isSQLite(db) { + // SQLite uses the index by default + if !plan.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("unexpected query plan %+v", plan) } } else { - var explain mysqlQueryPlan - if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { - t.Fatal(err) - } else if !explain.usesIndex("") || explain.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("index 'idx_contracts_fcid_timestamp' not expected to be used in query '%s' although it should use an index, plan %+v", query, explain) + // MySQL uses an index, but not 'idx_contracts_fcid_timestamp' + if !plan.usesIndex("") || plan.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("unexpected query plan %+v", plan) } - // update query to specify the index - if err := ss.dbMetrics.Raw(fmt.Sprintf("EXPLAIN %s;", queryWithHint)).Scan(&explain).Error; err != nil { + // redo the query with hint + queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) + if err := explainQuery(db, queryWithHint, plan); err != nil { t.Fatal(err) - } else if !explain.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("index 'idx_contracts_fcid_timestamp' should've been used in query '%s', plan %+v", query, explain) + } + + // assert it uses 'idx_contracts_fcid_timestamp' now + if !plan.usesIndex("idx_contracts_fcid_timestamp") { + t.Fatalf("unexpected query plan %+v", plan) } } } +func queryPlan(db *gorm.DB) interface{ usesIndex(index string) bool } { + if isSQLite(db) { + return &sqliteQueryPlan{} + } + return &mysqlQueryPlan{} +} + +func explainQuery(db *gorm.DB, query string, res interface{}) (err error) { + if isSQLite(db) { + err = db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&res).Error + } else { + err = db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&res).Error + } + return +} + func TestApplyUpdatesErr(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From 1ee2f044d1849bc7f9959a6be3d0c75984865f23 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 13 Mar 2024 16:51:39 +0100 Subject: [PATCH 038/201] stores: fix ListObjects not returning etag or mimetype --- internal/test/e2e/s3_test.go | 7 +++++++ stores/metadata.go | 2 +- stores/metadata_test.go | 7 +++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index bb9745d51..cb4c7cd60 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -499,6 +499,13 @@ func TestS3List(t *testing.T) { if !cmp.Equal(test.want, got) { t.Errorf("test %d: unexpected response, want %v got %v", i, test.want, got) } + for _, obj := range result.Contents { + if obj.ETag == "" { + t.Fatal("expected non-empty ETag") + } else if obj.LastModified.IsZero() { + t.Fatal("expected non-zero LastModified") + } + } } } diff --git a/stores/metadata.go b/stores/metadata.go index cfcde4744..0733ad567 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2894,7 +2894,7 @@ func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sort } var rows []rawObjectMetadata if err := s.db. - Select("o.object_id as Name, o.size as Size, o.health as Health, o.mime_type as mimeType, o.created_at as ModTime"). + Select("o.object_id as Name, o.size as Size, o.health as Health, o.mime_type as MimeType, o.created_at as ModTime, o.etag as ETag"). Model(&dbObject{}). Table("objects o"). Joins("INNER JOIN buckets b ON o.db_bucket_id = b.id"). diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 47d004957..55bf93573 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -3492,6 +3492,13 @@ func TestListObjects(t *testing.T) { {"/foo", "size", "ASC", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, {"/foo", "size", "DESC", "", []api.ObjectMetadata{{Name: "/foo/baz/quuz", Size: 4, Health: .5}, {Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/bar", Size: 1, Health: 1}}}, } + // set common fields + for i := range tests { + for j := range tests[i].want { + tests[i].want[j].ETag = testETag + tests[i].want[j].MimeType = testMimeType + } + } for _, test := range tests { res, err := ss.ListObjects(ctx, api.DefaultBucketName, test.prefix, test.sortBy, test.sortDir, "", -1) if err != nil { From 1c68b131ebe55e4b05e03d43ebe97cfbb889a88c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 14 Mar 2024 09:52:29 +0100 Subject: [PATCH 039/201] s3: fix markers --- s3/backend.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/s3/backend.go b/s3/backend.go index 5261bd5f7..0e3a3e61e 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -88,6 +88,11 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 page.MaxKeys = maxKeysDefault } + // Adjust marker + if page.HasMarker { + page.Marker = "/" + page.Marker + } + var objects []api.ObjectMetadata var err error response := gofakes3.NewObjectList() @@ -142,6 +147,10 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } + // Remove the leading slash from the marker since we also do that for the + // name of each object + response.NextMarker = strings.TrimPrefix(response.NextMarker, "/") + // Loop over the entries and add them to the response. for _, object := range objects { key := strings.TrimPrefix(object.Name, "/") From 326320d699adce503708645c8ebe3799519afe30 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 14 Mar 2024 10:25:34 +0100 Subject: [PATCH 040/201] e2e: extend TestS3List --- internal/test/e2e/s3_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index cb4c7cd60..6c13e8426 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -507,6 +507,23 @@ func TestS3List(t *testing.T) { } } } + + // use pagination to loop over objects one-by-one + marker := "" + expectedOrder := []string{"a/", "a/a/a", "a/b", "ab", "b", "c/a", "d", "y/", "y/y/y/y"} + hasMore := true + for i := 0; hasMore; i++ { + result, err := core.ListObjectsV2("bucket", "", "", marker, "", 1) + if err != nil { + t.Fatal(err) + } else if len(result.Contents) != 1 { + t.Fatalf("unexpected number of objects, %d != 1", len(result.Contents)) + } else if result.Contents[0].Key != expectedOrder[i] { + t.Errorf("unexpected object, %s != %s", result.Contents[0].Key, expectedOrder[i]) + } + marker = result.NextContinuationToken + hasMore = result.IsTruncated + } } func TestS3MultipartUploads(t *testing.T) { From 9040557ba469ffdbe18cfe19da485d57f8c3e217 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 14 Mar 2024 10:42:08 +0100 Subject: [PATCH 041/201] renterd: use utils.IsErr instead of strings.Contains --- autopilot/autopilot.go | 4 ++-- autopilot/contractor.go | 2 +- cmd/renterd/main.go | 4 +++- s3/authentication.go | 4 ++-- s3/backend.go | 27 ++++++++++++++------------- stores/migrations.go | 4 ++-- worker/download.go | 4 ++-- worker/worker.go | 12 ++++++------ 8 files changed, 32 insertions(+), 29 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c53e4ec4c..eb08c9456 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -632,7 +632,7 @@ func (ap *Autopilot) isStopped() bool { func (ap *Autopilot) configHandlerGET(jc jape.Context) { autopilot, err := ap.bus.Autopilot(jc.Request.Context(), ap.id) - if err != nil && strings.Contains(err.Error(), api.ErrAutopilotNotFound.Error()) { + if utils.IsErr(err, api.ErrAutopilotNotFound) { jc.Error(errors.New("autopilot is not configured yet"), http.StatusNotFound) return } @@ -654,7 +654,7 @@ func (ap *Autopilot) configHandlerPUT(jc jape.Context) { // fetch the autopilot and update its config var contractSetChanged bool autopilot, err := ap.bus.Autopilot(jc.Request.Context(), ap.id) - if err != nil && strings.Contains(err.Error(), api.ErrAutopilotNotFound.Error()) { + if utils.IsErr(err, api.ErrAutopilotNotFound) { autopilot = api.Autopilot{ID: ap.id, Config: cfg} } else { if autopilot.Config.Contracts.Set != cfg.Contracts.Set { diff --git a/autopilot/contractor.go b/autopilot/contractor.go index d69f2a354..8dfc702f3 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -1004,7 +1004,7 @@ func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont ctx, cancel := context.WithTimeout(ctx, timeoutBroadcastRevision) err := w.RHPBroadcast(ctx, contract.ID) cancel() - if err != nil && strings.Contains(err.Error(), "transaction has a file contract with an outdated revision number") { + if utils.IsErr(err, errors.New("transaction has a file contract with an outdated revision number")) { continue // don't log - revision was already broadcasted } else if err != nil { c.logger.Warnw(fmt.Sprintf("failed to broadcast contract revision: %v", err), diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 98e075d92..093747796 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -3,6 +3,7 @@ package main import ( "context" "encoding/json" + "errors" "flag" "fmt" "log" @@ -24,6 +25,7 @@ import ( "go.sia.tech/renterd/bus" "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" "go.sia.tech/renterd/worker" @@ -224,7 +226,7 @@ func parseEnvVar(s string, v interface{}) { func listenTCP(logger *zap.Logger, addr string) (net.Listener, error) { l, err := net.Listen("tcp", addr) - if err != nil && strings.Contains(err.Error(), "no such host") && strings.Contains(addr, "localhost") { + if utils.IsErr(err, errors.New("no such host")) && strings.Contains(addr, "localhost") { // fall back to 127.0.0.1 if 'localhost' doesn't work _, port, err := net.SplitHostPort(addr) if err != nil { diff --git a/s3/authentication.go b/s3/authentication.go index 215da52b7..67017356b 100644 --- a/s3/authentication.go +++ b/s3/authentication.go @@ -5,11 +5,11 @@ import ( "fmt" "io" "net/http" - "strings" "go.sia.tech/gofakes3" "go.sia.tech/gofakes3/signature" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" ) var ( @@ -89,7 +89,7 @@ func newAuthenticatedBackend(b *s3) *authenticatedBackend { func (b *authenticatedBackend) applyBucketPolicy(ctx context.Context, bucketName string, p *permissions) error { bucket, err := b.backend.b.Bucket(ctx, bucketName) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { return gofakes3.BucketNotFound(bucketName) } else if err != nil { return gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) diff --git a/s3/backend.go b/s3/backend.go index 5261bd5f7..62f7a3e76 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -10,6 +10,7 @@ import ( "go.sia.tech/gofakes3" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.uber.org/zap" ) @@ -109,7 +110,7 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 } var res api.ObjectsResponse res, err = s.b.Object(ctx, bucketName, path, opts) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { return nil, gofakes3.BucketNotFound(bucketName) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -129,7 +130,7 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 var res api.ObjectsListResponse res, err = s.b.ListObjects(ctx, bucketName, opts) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { return nil, gofakes3.BucketNotFound(bucketName) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -169,7 +170,7 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 // gofakes3.ErrBucketAlreadyExists MUST be returned. func (s *s3) CreateBucket(ctx context.Context, name string) error { err := s.b.CreateBucket(ctx, name, api.CreateBucketOptions{}) - if err != nil && strings.Contains(err.Error(), api.ErrBucketExists.Error()) { + if utils.IsErr(err, api.ErrBucketExists) { return gofakes3.ErrBucketAlreadyExists } else if err != nil { return gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -183,7 +184,7 @@ func (s *s3) CreateBucket(ctx context.Context, name string) error { // TODO: backend could be improved to allow for checking specific dir in root. func (s *s3) BucketExists(ctx context.Context, name string) (bool, error) { _, err := s.b.Bucket(ctx, name) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { return false, nil } else if err != nil { return false, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -203,9 +204,9 @@ func (s *s3) BucketExists(ctx context.Context, name string) (bool, error) { // atomically checking whether a bucket is empty. func (s *s3) DeleteBucket(ctx context.Context, name string) error { err := s.b.DeleteBucket(ctx, name) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotEmpty.Error()) { + if utils.IsErr(err, api.ErrBucketNotEmpty) { return gofakes3.ErrBucketNotEmpty - } else if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + } else if utils.IsErr(err, api.ErrBucketNotFound) { return gofakes3.BucketNotFound(name) } else if err != nil { return gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -243,9 +244,9 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range } res, err := s.w.GetObject(ctx, bucketName, objectName, opts) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { return nil, gofakes3.BucketNotFound(bucketName) - } else if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + } else if utils.IsErr(err, api.ErrObjectNotFound) { return nil, gofakes3.KeyNotFound(objectName) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -298,7 +299,7 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go res, err := s.w.HeadObject(ctx, bucketName, objectName, api.HeadObjectOptions{ IgnoreDelim: true, }) - if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + if utils.IsErr(err, api.ErrObjectNotFound) { return nil, gofakes3.KeyNotFound(objectName) } else if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -346,9 +347,9 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go // isn't a null version, Amazon S3 does not remove any objects. func (s *s3) DeleteObject(ctx context.Context, bucketName, objectName string) (gofakes3.ObjectDeleteResult, error) { err := s.b.DeleteObject(ctx, bucketName, objectName, api.DeleteObjectOptions{}) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { return gofakes3.ObjectDeleteResult{}, gofakes3.BucketNotFound(bucketName) - } else if err != nil && !strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + } else if utils.IsErr(err, api.ErrObjectNotFound) { return gofakes3.ObjectDeleteResult{}, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } @@ -371,7 +372,7 @@ func (s *s3) PutObject(ctx context.Context, bucketName, key string, meta map[str } ur, err := s.w.UploadObject(ctx, input, bucketName, key, opts) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { return gofakes3.PutObjectResult{}, gofakes3.BucketNotFound(bucketName) } else if err != nil { return gofakes3.PutObjectResult{}, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) @@ -387,7 +388,7 @@ func (s *s3) DeleteMulti(ctx context.Context, bucketName string, objects ...stri var res gofakes3.MultiDeleteResult for _, objectName := range objects { err := s.b.DeleteObject(ctx, bucketName, objectName, api.DeleteObjectOptions{}) - if err != nil && !strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + if err != nil && !utils.IsErr(err, api.ErrObjectNotFound) { res.Error = append(res.Error, gofakes3.ErrorResult{ Key: objectName, Code: gofakes3.ErrInternal, diff --git a/stores/migrations.go b/stores/migrations.go index 6395b52bb..b0304090e 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -3,9 +3,9 @@ package stores import ( "errors" "fmt" - "strings" "github.com/go-gormigrate/gormigrate/v2" + "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" "gorm.io/gorm" ) @@ -32,7 +32,7 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { ID: "00002_prune_slabs_trigger", Migrate: func(tx *gorm.DB) error { err := performMigration(tx, dbIdentifier, "00002_prune_slabs_trigger", logger) - if err != nil && strings.Contains(err.Error(), errMySQLNoSuperPrivilege.Error()) { + if utils.IsErr(err, errMySQLNoSuperPrivilege) { logger.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") } return err diff --git a/worker/download.go b/worker/download.go index 3a58bbc98..6f070acbd 100644 --- a/worker/download.go +++ b/worker/download.go @@ -7,13 +7,13 @@ import ( "fmt" "io" "math" - "strings" "sync" "time" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/stats" "go.uber.org/zap" @@ -454,7 +454,7 @@ func (mgr *downloadManager) numDownloaders() int { // in the partial slab buffer. func (mgr *downloadManager) fetchPartialSlab(ctx context.Context, key object.EncryptionKey, offset, length uint32) ([]byte, *object.Slab, error) { data, err := mgr.os.FetchPartialSlab(ctx, key, offset, length) - if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + if utils.IsErr(err, api.ErrObjectNotFound) { // Check if slab was already uploaded. slab, err := mgr.os.Slab(ctx, key) if err != nil { diff --git a/worker/worker.go b/worker/worker.go index dab8ef30a..99323b501 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -878,7 +878,7 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { IgnoreDelim: ignoreDelim, OnlyMetadata: true, }) - if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return } else if err != nil { @@ -951,7 +951,7 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { path := jc.PathParam("path") res, err := w.bus.Object(ctx, bucket, path, opts) - if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return } else if jc.Check("couldn't get object or entries", err) != nil { @@ -1041,7 +1041,7 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { // return early if the bucket does not exist _, err = w.bus.Bucket(ctx, bucket) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { jc.Error(fmt.Errorf("bucket '%s' not found; %w", bucket, err), http.StatusNotFound) return } @@ -1160,7 +1160,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { // return early if the bucket does not exist _, err = w.bus.Bucket(ctx, bucket) - if err != nil && strings.Contains(err.Error(), api.ErrBucketNotFound.Error()) { + if utils.IsErr(err, api.ErrBucketNotFound) { jc.Error(fmt.Errorf("bucket '%s' not found; %w", bucket, err), http.StatusNotFound) return } @@ -1263,7 +1263,7 @@ func (w *worker) objectsHandlerDELETE(jc jape.Context) { return } err := w.bus.DeleteObject(jc.Request.Context(), bucket, jc.PathParam("path"), api.DeleteObjectOptions{Batch: batch}) - if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { + if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return } @@ -1563,5 +1563,5 @@ func isErrHostUnreachable(err error) bool { } func isErrDuplicateTransactionSet(err error) bool { - return err != nil && strings.Contains(err.Error(), modules.ErrDuplicateTransactionSet.Error()) + return utils.IsErr(err, modules.ErrDuplicateTransactionSet) } From 8f6fca82efa72e659d701942d977e9bde829abe9 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 14 Mar 2024 15:56:44 +0100 Subject: [PATCH 042/201] ci: add workflow to automatically add issues and prs to project board --- .github/workflows/project-add.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/project-add.yml diff --git a/.github/workflows/project-add.yml b/.github/workflows/project-add.yml new file mode 100644 index 000000000..526a04603 --- /dev/null +++ b/.github/workflows/project-add.yml @@ -0,0 +1,21 @@ +name: Add bugs to bugs project + +on: + issues: + types: + - opened + pull_request: + types: + - opened + +jobs: + add-to-project: + name: Add issue to project + runs-on: ubuntu-latest + steps: + - uses: actions/add-to-project@v0.5.0 + with: + # You can target a project in a different organization + # to the issue + project-url: https://github.com/orgs/SiaFoundation/projects/5 + github-token: ${{ secrets.PAT_ADD_TO_PROJECT }} From 5c60e877eef0902040d190807e4393431622439b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 14 Mar 2024 16:38:26 +0100 Subject: [PATCH 043/201] ci: update project-add.yml --- .github/workflows/project-add.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/project-add.yml b/.github/workflows/project-add.yml index 526a04603..3304fc0db 100644 --- a/.github/workflows/project-add.yml +++ b/.github/workflows/project-add.yml @@ -1,4 +1,4 @@ -name: Add bugs to bugs project +name: Add issues and PRs to Sia project on: issues: From c9f7ebabbb8a9f8559e4e5b916ae2cb1b483658b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 15 Mar 2024 10:43:21 +0100 Subject: [PATCH 044/201] stores: add setting type --- stores/settingsdb.go | 10 +++++----- stores/types.go | 30 ++++++++++++++++++++++++++++++ stores/types_test.go | 14 ++++++++++++++ 3 files changed, 49 insertions(+), 5 deletions(-) create mode 100644 stores/types_test.go diff --git a/stores/settingsdb.go b/stores/settingsdb.go index 3b1eafc31..f7ba1e82d 100644 --- a/stores/settingsdb.go +++ b/stores/settingsdb.go @@ -14,8 +14,8 @@ type ( dbSetting struct { Model - Key string `gorm:"unique;index;NOT NULL"` - Value string `gorm:"NOT NULL"` + Key string `gorm:"unique;index;NOT NULL"` + Value setting `gorm:"NOT NULL"` } ) @@ -52,8 +52,8 @@ func (s *SQLStore) Setting(ctx context.Context, key string) (string, error) { } else if err != nil { return "", err } - s.settings[key] = entry.Value - return entry.Value, nil + s.settings[key] = string(entry.Value) + return string(entry.Value), nil } // Settings implements the bus.SettingStore interface. @@ -74,7 +74,7 @@ func (s *SQLStore) UpdateSetting(ctx context.Context, key, value string) error { DoUpdates: clause.AssignmentColumns([]string{"value"}), }).Create(&dbSetting{ Key: key, - Value: value, + Value: setting(value), }).Error if err != nil { return err diff --git a/stores/types.go b/stores/types.go index 42a8d29e4..4e4be2e45 100644 --- a/stores/types.go +++ b/stores/types.go @@ -35,8 +35,38 @@ type ( balance big.Int unsigned64 uint64 // used for storing large uint64 values in sqlite secretKey []byte + setting string ) +// GormDataType implements gorm.GormDataTypeInterface. +func (setting) GormDataType() string { + return "string" +} + +// String implements fmt.Stringer to prevent "s3authentication" settings from +// getting leaked. +func (s setting) String() string { + if strings.Contains(string(s), "v4Keypairs") { + return "*****" + } + return string(s) +} + +// Scan scans value into the setting +func (s *setting) Scan(value interface{}) error { + str, ok := value.(string) + if !ok { + return errors.New(fmt.Sprint("failed to unmarshal setting value:", value)) + } + *s = setting(str) + return nil +} + +// Value returns an key value, implements driver.Valuer interface. +func (s setting) Value() (driver.Value, error) { + return string(s), nil +} + // GormDataType implements gorm.GormDataTypeInterface. func (secretKey) GormDataType() string { return "bytes" diff --git a/stores/types_test.go b/stores/types_test.go new file mode 100644 index 000000000..c985dd012 --- /dev/null +++ b/stores/types_test.go @@ -0,0 +1,14 @@ +package stores + +import "testing" + +func TestTypeSetting(t *testing.T) { + s1 := setting("some setting") + s2 := setting("v4Keypairs") + + if s1.String() != "some setting" { + t.Fatal("unexpected string") + } else if s2.String() != "*****" { + t.Fatal("unexpected string") + } +} From deb4af4c4fd2939b665919e3227e64bce182a7d5 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 15 Mar 2024 11:26:24 +0100 Subject: [PATCH 045/201] stores: add []byte case to Scan --- stores/types.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stores/types.go b/stores/types.go index 4e4be2e45..a8f753077 100644 --- a/stores/types.go +++ b/stores/types.go @@ -54,11 +54,14 @@ func (s setting) String() string { // Scan scans value into the setting func (s *setting) Scan(value interface{}) error { - str, ok := value.(string) - if !ok { - return errors.New(fmt.Sprint("failed to unmarshal setting value:", value)) + switch value := value.(type) { + case string: + *s = setting(value) + case []byte: + *s = setting(value) + default: + return fmt.Errorf("failed to unmarshal setting value from type %t", value) } - *s = setting(str) return nil } From d094f0c09d9e396a60ed594afcd112d7ea257ed4 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 15 Mar 2024 15:52:21 +0100 Subject: [PATCH 046/201] docs: update backup docs --- README.md | 178 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 123 insertions(+), 55 deletions(-) diff --git a/README.md b/README.md index 10ef2b0cb..5decc5568 100644 --- a/README.md +++ b/README.md @@ -25,92 +25,160 @@ A project roadmap is available on [GitHub](https://github.com/orgs/SiaFoundation ## Backups -This section provides instructions on creating backups for `renterd` and -restoring from those backups. Taking backups at regular intervals and testing -whether you can restore from such a backup are crucial for ensuring the -integrity and availability of your data. Make sure to never store your backups -on the same machine `renterd` is running on. +This section provides a step-by-step guide on how to create a backup for +`renterd` and restore from that backup. Taking backups at regular intervals and +testing whether you can restore from such a backup are _crucial_ for ensuring +the integrity and availability of your data. Also make sure to never store your +backups on the same machine `renterd` is running on. -### Databases +--- +**IMPORTANT NOTE** -When uploading data to the Sia Network, `renterd` stores all necessary metadata -to be able to download that data in its SQL databases. `renterd` uses two -databases: +Having a backup is not enough to ensure the data can be recovered in a disaster +scenario. The renter has to be online enough of the time to ensure the data gets +migrated away from hosts that went offline. -- **main database**: contains all object -, contracts - and host metadata -- **metrics database**: contains contract spending, performance metrics +--- -The main database is the most important one because it contains the object -metadata, which is crucial to be able to recover your data. The `metrics` -database is less important but there's various UI features that depend on it. +### Backup renterd ---- -**NOTE** +#### Step 1: shutdown renter -The name of these databases are configurable (see `RENTERD_DB_NAME` and -`RENTERD_DB_METRICS_NAME`), so make sure to use the configured values, the -following section will assume `renterd` defaults which are `renterd` and -`renterd_metrics` for the main and metrics database respectively. +To be on the safe side, we advise shutting down the renter before taking a +backup. The main reason is a feature called "upload packing", which is enabled +by default, which uses on-disk buffers. Taking a backup of a running `renterd` +node would not ensure consistency between what is in the database and what is +on-disk. Even if that feature is disabled, it is still advisable to shut down +the renter before taking a backup. ---- +#### Step 2: backing up the database + +`renterd` stores everything in two SQL databases, one main database and another +one for metrics. The main database holds things like contract - and host + metadata, but most importantly it includes all necessary object metadata to be +able to download the data uploaded to the Sia network. It is very important to +have a working backup of this database to ensure files can be recovered in the +event the database got corrupted. The metrics database is less important, but +there's various UI features that depend on it so we advise to back that up as +well. -Depending on the user's configuration, these database are either SQLite or MySQL -database. The following section outlines how to backup `renterd` in both -scenarios. +Depending on how the renter is configured, the databases are either SQLite +(default) or MySQL databases. By default, the SQLite databases are called +`db.sqlite` and `metrics.sqlite` for the main and metrics database respectively. +These are again configurable, for the sake of this documentation however we'll +assume the defaults are used. -#### SQLite +These databases are located in a folder called `db`, right in the renter's root +directory. -Backing up a SQLite database can be done using the following command: +**SQLite** + +For SQLite, a backup is best taken using the `.backup` command, specifying the +database we want to backup and a name for the backup. There should only be two +files in the `db` folder, if you encounter write-ahead log files or index files +(usually named `X-wal` or `X-shm`) it's an indication the renter was not shut +down gracefully. In that case it's best to restart the renter and shut it down +again. ```bash sqlite3 db.sqlite ".backup 'db.bkp'" sqlite3 metrics.sqlite ".backup 'metrics.bkp'" ``` -There is an alternative `.dump` command, which exports the database into a text -file containing SQL statements. This backup is useful in its own, because it's -more portable and can be used to import the contents of the database into -another database on another system entirely. The `.backup` however yields a -byte-for-byte replica of the original database file, it is usually a lot faster -on large databases and it can be performed on a database that's actively being -read or written to, even though we advise to shut down the renter before taking -a backup. - -Restoring from a backup is as simple as putting the backup in place of the original. -Another useful tool for backing up SQLite database is https://litestream.io/. - -#### MySQL +**MySQL** Backuping up a MySQL database can be done using the `mysqldump` command. It's a utility provided by MySQL to backup or transfer a MySQL database. It's usually installed alongside the MySQL cient tools. -The following command assumes MySQL is being ran from within a docker container: - ```bash -docker exec [MYSQL_CONTAINER_NAME] /usr/bin/mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd > renterd_bkp.sql +mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd > renterd_bkp.sql +mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics > renterd_metrics_bkp.sql +``` -docker exec [MYSQL_CONTAINER_NAME] /usr/bin/mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics > renterd_metrics_bkp.sql +#### Step 3: backing up partial slabs + +For users that have upload packing enabled, it is very important to back up +partial slabs alongside the database backups. These partial slabs are +essentially a sort of buffer that gets uploaded to the network when that buffer +reaches a certain size, drastically speeding up a bunch of small file uploads. +These partial slabs are located in a folder called `partial_slabs`, right in the +renter's root directory. + +```bash +tar -cvf partial_slabs.tar partial_slabs/ ``` -Restoring from this backup can be done using: +### Install from a backup + +The easiest way to install from a backup is to do a fresh install of `renterd` +and then following the steps outlined in the section below where we restore from +a backup (e.g. overwrite the database). The most important thing to keep in mind +is to use the same seed (`RENTERD_SEED`) as the one in the backup. + +When you have the new renter up and running and consensus is synced, we don't +have to go through configuring it because we'll overwrite the settings in the +following section. + +### Restore a backup + +#### Step 1: shutdown renter + +Same as before we advise to shut down the renter before reinstating a backup to +ensure consistency. It's a good idea to backup the database right before trying +to restore a backup to be safe and have a way out in case overwriting the +database renders it corrupt somehow. + +#### Step 2: restore the database backup + +**SQLite** + +For SQLite we can reinstate the database by replacing both `.sqlite` files with +our backups. + +**MySQL** + +Depending on when the backup was taken its schema might be out of date. To +maximize the chance the schema migrations go smoothly, it's advisable to +recreate the databases before importing the backup. Take a backup before doing +this. ```bash -cat renterd_bkp.sql | docker exec -i [MYSQL_CONTAINER_NAME] /usr/bin/mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd +# log in to MySQL shell +mysql -u [RENTERD_DB_USER] -p + +# recreate renterd database +DROP DATABASE IF EXISTS renterd; +CREATE DATABASE renterd; -cat renterd_metrics_bkp.sql | docker exec -i [MYSQL_CONTAINER_NAME] /usr/bin/mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics +# recreate renterd_metrics database +DROP DATABASE IF EXISTS renterd_metrics; +CREATE DATABASE renterd_metrics; ``` -### Partial Slabs +The backups can then be imported using the following commands: -For users that have upload packing enabled, it is very important to back up -partial slabs alongside the database backups. These partial slabs are -essentially a sort of buffer that gets uploaded to the network when that buffer -reaches the size of a full slab, drastically speeding up a bunch of small file -uploads. To ensure consistency between the database and these files on disk, it -is recommended to gracefully shut the renter down before taking a backup of its -database. These partial slabs are located in a folder called `partial_slabs`, -right in the root folder. +``` +cat renterd_bkp.sql | mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd +cat renterd_metrics_bkp.sql | mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics +``` +#### Step 3: restore the partial slabs + +If applicable, remove the contents of the `partial_slabs` directory and replace it with your backup. + +```bash +rm -rf partial_slabs +tar -xvf partial_slabs.tar +``` + +#### Step 4: start the renter + +After starting the renter it is possible it has to run through migrations to its +database schema. Depending on when the backup was taken, this might take some +time. If we restored the backup on a fresh `renterd` install, it will take some +time for consensus to sync. + +#### Scenario 2: ## Docker Support From 2aaf9113e9aaf8a61a703fc682ce702005821fbe Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 15 Mar 2024 16:29:40 +0100 Subject: [PATCH 047/201] docs: update backup docs --- README.md | 102 +++++++++++++++++++++++------------------------------- 1 file changed, 43 insertions(+), 59 deletions(-) diff --git a/README.md b/README.md index 5decc5568..c1b444715 100644 --- a/README.md +++ b/README.md @@ -25,71 +25,64 @@ A project roadmap is available on [GitHub](https://github.com/orgs/SiaFoundation ## Backups -This section provides a step-by-step guide on how to create a backup for -`renterd` and restore from that backup. Taking backups at regular intervals and -testing whether you can restore from such a backup are _crucial_ for ensuring -the integrity and availability of your data. Also make sure to never store your -backups on the same machine `renterd` is running on. +This section provides a step-by-step guide covering the procedures for creating +and restoring backups for `renterd`. Regularly backing up your renter's metadata +and verifying its restorability are essential practices to ensure your data's +integrity and availability in the event of a disaster scenario. Ensure backups +are stored on a different machine than the one running `renterd` to avoid data +loss in case of hardware failure. --- **IMPORTANT NOTE** -Having a backup is not enough to ensure the data can be recovered in a disaster -scenario. The renter has to be online enough of the time to ensure the data gets -migrated away from hosts that went offline. +It is important to note that having a backup is not enough to ensure the data +can be recovered from the network. The renter has to be online enough of the +time to ensure data gets migrated away from hosts that went offline. --- -### Backup renterd +### Creating a backup -#### Step 1: shutdown renter +#### Step 1: shut down renter -To be on the safe side, we advise shutting down the renter before taking a -backup. The main reason is a feature called "upload packing", which is enabled -by default, which uses on-disk buffers. Taking a backup of a running `renterd` -node would not ensure consistency between what is in the database and what is -on-disk. Even if that feature is disabled, it is still advisable to shut down -the renter before taking a backup. +It's strongly recommended to shut down the renter before creating a backup to +ensure data consistency. This precaution addresses the "upload packing" feature +(enabled by default), which relies on on-disk buffers. To capture a consistent +state between the database and on-disk data, shut down `renterd` first. Even if +the feature is not enabled, it is best to shut down the renter before taking a +backup to be on the safe side and ensure consistency. #### Step 2: backing up the database -`renterd` stores everything in two SQL databases, one main database and another -one for metrics. The main database holds things like contract - and host - metadata, but most importantly it includes all necessary object metadata to be -able to download the data uploaded to the Sia network. It is very important to -have a working backup of this database to ensure files can be recovered in the -event the database got corrupted. The metrics database is less important, but -there's various UI features that depend on it so we advise to back that up as -well. +`renterd` uses two SQL databases: a main database for contracts, host metadata +and object metadata essential for data retrieval, and a metrics database for UI +features. Both are critical but prioritize the main database for file recovery. Depending on how the renter is configured, the databases are either SQLite (default) or MySQL databases. By default, the SQLite databases are called -`db.sqlite` and `metrics.sqlite` for the main and metrics database respectively. -These are again configurable, for the sake of this documentation however we'll -assume the defaults are used. - -These databases are located in a folder called `db`, right in the renter's root -directory. +`db.sqlite` and `metrics.sqlite` and are located in a folder called `db`, right +in the renter's root directory. **SQLite** -For SQLite, a backup is best taken using the `.backup` command, specifying the -database we want to backup and a name for the backup. There should only be two -files in the `db` folder, if you encounter write-ahead log files or index files -(usually named `X-wal` or `X-shm`) it's an indication the renter was not shut -down gracefully. In that case it's best to restart the renter and shut it down -again. +Use the `.backup` command to create a backup of the SQLite databases. ```bash sqlite3 db.sqlite ".backup 'db.bkp'" sqlite3 metrics.sqlite ".backup 'metrics.bkp'" ``` +There should only be two files in the `db` folder, if you encounter write-ahead +log files or index files (usually named `-wal` or `-shm`) it indicates the +renter was not shut down gracefully. In that case it's best to restart the +renter and shut it down again. + **MySQL** -Backuping up a MySQL database can be done using the `mysqldump` command. It's a -utility provided by MySQL to backup or transfer a MySQL database. It's usually -installed alongside the MySQL cient tools. +Use the `mysqldump` command to create a backup of the MySQL databases. It's a +utility provided by MySQL to backup or transfer a MySQL database and it's +usually installed alongside the MySQL cient tools. Replace placeholders with +actual user and password. ```bash mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd > renterd_bkp.sql @@ -98,29 +91,19 @@ mysqldump -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics #### Step 3: backing up partial slabs -For users that have upload packing enabled, it is very important to back up -partial slabs alongside the database backups. These partial slabs are -essentially a sort of buffer that gets uploaded to the network when that buffer -reaches a certain size, drastically speeding up a bunch of small file uploads. -These partial slabs are located in a folder called `partial_slabs`, right in the -renter's root directory. +If "upload packing" is enabled, back up the `partial_slabs` folder located in +the renter's root directory. These files contain data that has not been uploaded +to the network yet, losing these files means an immediate loss of your data. ```bash tar -cvf partial_slabs.tar partial_slabs/ ``` -### Install from a backup - -The easiest way to install from a backup is to do a fresh install of `renterd` -and then following the steps outlined in the section below where we restore from -a backup (e.g. overwrite the database). The most important thing to keep in mind -is to use the same seed (`RENTERD_SEED`) as the one in the backup. +### Restoring from a backup -When you have the new renter up and running and consensus is synced, we don't -have to go through configuring it because we'll overwrite the settings in the -following section. - -### Restore a backup +If the goal is to install `renterd` from a backup on a new machine, the easiest +way is to do a fresh `renterd` install and then overwrite the empty database +with the backup. Use the same `RENTERD_SEED` as the original installation. #### Step 1: shutdown renter @@ -134,7 +117,9 @@ database renders it corrupt somehow. **SQLite** For SQLite we can reinstate the database by replacing both `.sqlite` files with -our backups. +our backups. Make sure to rename them to their original filename `db.sqlite` and +`metrics.sqlite`. These filenames are configurable, so make sure you match the +configured values. **MySQL** @@ -162,6 +147,7 @@ The backups can then be imported using the following commands: cat renterd_bkp.sql | mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd cat renterd_metrics_bkp.sql | mysql -u [RENTERD_DB_USER] --password=[RENTERD_DB_PASSWORD] renterd_metrics ``` + #### Step 3: restore the partial slabs If applicable, remove the contents of the `partial_slabs` directory and replace it with your backup. @@ -178,8 +164,6 @@ database schema. Depending on when the backup was taken, this might take some time. If we restored the backup on a fresh `renterd` install, it will take some time for consensus to sync. -#### Scenario 2: - ## Docker Support `renterd` includes a `Dockerfile` which can be used for building and running From b6f5f303ca28b9288e7bbe5b6d1fd4462cd35fd8 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 15 Mar 2024 17:30:05 +0100 Subject: [PATCH 048/201] stores: add index hint to ObjectEntries --- stores/metadata.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index c543695bd..e44cb3a63 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1210,6 +1210,11 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort offset = 0 } + indexHint := "" + if !isSQLite(s.db) { + indexHint = "USE INDEX (idx_object_bucket, idx_objects_created_at)" + } + onameExpr := fmt.Sprintf("CASE INSTR(SUBSTR(object_id, ?), '/') WHEN 0 THEN %s ELSE %s END", sqlConcat(s.db, "?", "SUBSTR(object_id, ?)"), sqlConcat(s.db, "?", "substr(SUBSTR(object_id, ?), 1, INSTR(SUBSTR(object_id, ?), '/'))"), @@ -1226,13 +1231,14 @@ FROM ( SUM(size) AS Size, MIN(health) as Health, ANY_VALUE(mime_type) as MimeType - FROM objects + FROM objects %s INNER JOIN buckets b ON objects.db_bucket_id = b.id WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND b.name = ? AND SUBSTR(%s, 1, ?) = ? AND %s != ? GROUP BY oname ) baseQuery `, onameExpr, + indexHint, onameExpr, onameExpr, ) From d4bea89017ddffd051998d46f0f473453a01369c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 15 Mar 2024 17:31:58 +0100 Subject: [PATCH 049/201] ci: remove deadcode linter --- .golangci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 9aad5bd19..ace11db65 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -104,7 +104,6 @@ linters: - tagliatelle - unused - unparam - - deadcode issues: # Maximum issues count per one linter. Set to 0 to disable. Default is 50. From 64748a606925c7d6d1a3589be6889e57252d0305 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 01:49:31 +0000 Subject: [PATCH 050/201] build(deps): bump go.sia.tech/hostd from 1.0.2 to 1.0.3 Bumps [go.sia.tech/hostd](https://github.com/SiaFoundation/hostd) from 1.0.2 to 1.0.3. - [Release notes](https://github.com/SiaFoundation/hostd/releases) - [Commits](https://github.com/SiaFoundation/hostd/compare/v1.0.2...v1.0.3) --- updated-dependencies: - dependency-name: go.sia.tech/hostd dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e0933ff50..c81e1992a 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( go.sia.tech/core v0.2.1 go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db - go.sia.tech/hostd v1.0.2 + go.sia.tech/hostd v1.0.3 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca diff --git a/go.sum b/go.sum index f42a8843a..6dbcfc5e9 100644 --- a/go.sum +++ b/go.sum @@ -242,8 +242,8 @@ go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db h1:t35K7tD79+ZZPHJ8XPaFopQvhGlQ5r1o9UgZnLOTvmc= go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= -go.sia.tech/hostd v1.0.2 h1:GjzNIAlwg3/dViF6258Xn5DI3+otQLRqmkoPDugP+9Y= -go.sia.tech/hostd v1.0.2/go.mod h1:zGw+AGVmazAp4ydvo7bZLNKTy1J51RI6Mp/oxRtYT6c= +go.sia.tech/hostd v1.0.3 h1:BCaFg6DGf33JEH/5DqFj6cnaz3EbiyjpbhfSj/Lo6e8= +go.sia.tech/hostd v1.0.3/go.mod h1:R+01UddrgmAUcdBkEO8VcnYqPX/mod45DC5m/v/crzE= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= From ac9d0aab23226ca64687c6e8fd9e8d00f79d9698 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 08:47:33 +0000 Subject: [PATCH 051/201] build(deps): bump go.sia.tech/gofakes3 Bumps [go.sia.tech/gofakes3](https://github.com/SiaFoundation/gofakes3) from 0.0.0-20240311124002-c206381023db to 0.0.1. - [Commits](https://github.com/SiaFoundation/gofakes3/commits/v0.0.1) --- updated-dependencies: - dependency-name: go.sia.tech/gofakes3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c81e1992a..c317ab683 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe go.sia.tech/core v0.2.1 go.sia.tech/coreutils v0.0.3 - go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db + go.sia.tech/gofakes3 v0.0.1 go.sia.tech/hostd v1.0.3 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 diff --git a/go.sum b/go.sum index 6dbcfc5e9..519a9325e 100644 --- a/go.sum +++ b/go.sum @@ -240,8 +240,8 @@ go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= -go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db h1:t35K7tD79+ZZPHJ8XPaFopQvhGlQ5r1o9UgZnLOTvmc= -go.sia.tech/gofakes3 v0.0.0-20240311124002-c206381023db/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= +go.sia.tech/gofakes3 v0.0.1 h1:8vtYH/B17NJ4GXLWiONfhwBrrmtJtYiofnO3PfjU298= +go.sia.tech/gofakes3 v0.0.1/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= go.sia.tech/hostd v1.0.3 h1:BCaFg6DGf33JEH/5DqFj6cnaz3EbiyjpbhfSj/Lo6e8= go.sia.tech/hostd v1.0.3/go.mod h1:R+01UddrgmAUcdBkEO8VcnYqPX/mod45DC5m/v/crzE= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= From 5440f904448dfb4b541bbc636b4793010e24a28c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 18 Mar 2024 10:51:41 +0100 Subject: [PATCH 052/201] stores: fix docstring --- stores/types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/types.go b/stores/types.go index a8f753077..a2628d834 100644 --- a/stores/types.go +++ b/stores/types.go @@ -65,7 +65,7 @@ func (s *setting) Scan(value interface{}) error { return nil } -// Value returns an key value, implements driver.Valuer interface. +// Value returns a setting value, implements driver.Valuer interface. func (s setting) Value() (driver.Value, error) { return string(s), nil } From acf6d9c69ffa670be05d88eb258d736e0c24b568 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 15 Mar 2024 17:25:11 +0100 Subject: [PATCH 053/201] main: update logging config to match hostd's --- cmd/renterd/logger.go | 114 +++++++++++++++++++++++++++++++++++++++++ cmd/renterd/main.go | 44 +++++++++++----- config/config.go | 27 ++++++++-- go.mod | 1 + go.sum | 22 ++++++++ internal/node/node.go | 47 ++--------------- stores/logger.go | 116 ------------------------------------------ stores/sql_test.go | 7 +-- 8 files changed, 196 insertions(+), 182 deletions(-) create mode 100644 cmd/renterd/logger.go delete mode 100644 stores/logger.go diff --git a/cmd/renterd/logger.go b/cmd/renterd/logger.go new file mode 100644 index 000000000..964a1ce39 --- /dev/null +++ b/cmd/renterd/logger.go @@ -0,0 +1,114 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "go.sia.tech/renterd/config" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func NewLogger(dir string, cfg config.Log) (*zap.Logger, func(context.Context) error, error) { + // path + path := filepath.Join(dir, "renterd.log") + if cfg.Path != "" { + path = filepath.Join(cfg.Path, "renterd.log") + } + + if cfg.File.Path != "" { + path = cfg.File.Path + } + + // log level + level, err := zapcore.ParseLevel(cfg.Path) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse log level: %w", err) + } + + fileLevel := level + if cfg.File.Level != "" { + fileLevel, err = zapcore.ParseLevel(cfg.File.Level) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse file log level: %w", err) + } + } + + stdoutLevel := level + if cfg.StdOut.Level != "" { + stdoutLevel, err = zapcore.ParseLevel(cfg.StdOut.Level) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse stdout log level: %w", err) + } + } + + closeFn := func(_ context.Context) error { return nil } + + // file logger + var cores []zapcore.Core + if cfg.File.Enabled { + writer, cleanup, err := zap.Open(path) + if err != nil { + return nil, nil, err + } + closeFn = func(_ context.Context) error { + _ = writer.Sync() // ignore Error + cleanup() + return nil + } + + var encoder zapcore.Encoder + switch cfg.File.Format { + case "human": + encoder = humanEncoder(false) // disable colors in file log + default: // log file defaults to json + encoder = jsonEncoder() + } + cores = append(cores, zapcore.NewCore(encoder, writer, fileLevel)) + } + + // stdout logger + if cfg.StdOut.Enabled { + var encoder zapcore.Encoder + switch cfg.File.Format { + case "json": + encoder = jsonEncoder() + default: // stdout defaults to human + encoder = humanEncoder(cfg.StdOut.EnableANSI) + } + cores = append(cores, zapcore.NewCore(encoder, zapcore.AddSync(os.Stdout), stdoutLevel)) + } + + return zap.New( + zapcore.NewTee(cores...), + zap.AddCaller(), + zap.AddStacktrace(zapcore.ErrorLevel), + ), closeFn, nil +} + +// jsonEncoder returns a zapcore.Encoder that encodes logs as JSON intended for +// parsing. +func jsonEncoder() zapcore.Encoder { + return zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()) +} + +// humanEncoder returns a zapcore.Encoder that encodes logs as human-readable +// text. +func humanEncoder(showColors bool) zapcore.Encoder { + cfg := zap.NewProductionEncoderConfig() + cfg.TimeKey = "" // prevent duplicate timestamps + cfg.EncodeTime = zapcore.RFC3339TimeEncoder + cfg.EncodeDuration = zapcore.StringDurationEncoder + + if showColors { + cfg.EncodeLevel = zapcore.CapitalColorLevelEncoder + } else { + cfg.EncodeLevel = zapcore.CapitalLevelEncoder + } + + cfg.StacktraceKey = "" + cfg.CallerKey = "" + return zapcore.NewConsoleEncoder(cfg) +} diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 093747796..83a1f88e7 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -12,6 +12,7 @@ import ( "os" "os/signal" "path/filepath" + "runtime" "strings" "syscall" "time" @@ -34,6 +35,7 @@ import ( "golang.org/x/term" "gopkg.in/yaml.v3" "gorm.io/gorm/logger" + "moul.io/zapgorm2" ) const ( @@ -74,10 +76,6 @@ var ( }, ShutdownTimeout: 5 * time.Minute, Database: config.Database{ - Log: config.DatabaseLog{ - IgnoreRecordNotFoundError: true, - SlowThreshold: 100 * time.Millisecond, - }, MySQL: config.MySQL{ Database: "renterd", User: "renterd", @@ -85,7 +83,22 @@ var ( }, }, Log: config.Log{ - Level: "warn", + Path: "", // deprecated. included for compatibility. + Level: "info", + File: config.LogFile{ + Enabled: true, + Format: "json", + Path: os.Getenv("RENTERD_LOG_FILE"), + }, + StdOut: config.StdOut{ + Enabled: true, + Format: "human", + EnableANSI: runtime.GOOS != "windows", + }, + Database: config.DatabaseLog{ + IgnoreRecordNotFoundError: true, + SlowThreshold: 100 * time.Millisecond, + }, }, Bus: config.Bus{ AnnouncementMaxAgeHours: 24 * 7 * 52, // 1 year @@ -411,6 +424,7 @@ func main() { ) } + // Log level for db var level logger.LogLevel switch strings.ToLower(cfg.Log.Level) { case "silent": @@ -426,11 +440,7 @@ func main() { } // Create logger. - renterdLog := filepath.Join(cfg.Directory, "renterd.log") - if cfg.Log.Path != "" { - renterdLog = cfg.Log.Path - } - logger, closeFn, err := node.NewLogger(renterdLog) + logger, closeFn, err := NewLogger(cfg.Directory, cfg.Log) if err != nil { log.Fatalln("failed to create logger:", err) } @@ -438,10 +448,18 @@ func main() { logger.Info("renterd", zap.String("version", build.Version()), zap.String("network", build.NetworkName()), zap.String("commit", build.Commit()), zap.Time("buildDate", build.BuildTime())) - busCfg.DBLoggerConfig = stores.LoggerConfig{ + // configure database logger + dbLogCfg := cfg.Log.Database + if cfg.Database.Log != (config.DatabaseLog{}) { + dbLogCfg = cfg.Database.Log + } + busCfg.DBLogger = zapgorm2.Logger{ + ZapLogger: logger, LogLevel: level, - IgnoreRecordNotFoundError: cfg.Database.Log.IgnoreRecordNotFoundError, - SlowThreshold: cfg.Database.Log.SlowThreshold, + SlowThreshold: dbLogCfg.SlowThreshold, + SkipCallerLookup: false, + IgnoreRecordNotFoundError: dbLogCfg.IgnoreRecordNotFoundError, + Context: nil, } type shutdownFn struct { diff --git a/config/config.go b/config/config.go index 79100ba1c..40a26bda0 100644 --- a/config/config.go +++ b/config/config.go @@ -35,7 +35,7 @@ type ( } Database struct { - Log DatabaseLog `yaml:"log,omitempty"` + Log DatabaseLog `yaml:"log,omitempty"` // deprecated. included for compatibility. // optional fields depending on backend MySQL MySQL `yaml:"mysql,omitempty"` } @@ -52,10 +52,29 @@ type ( SlabBufferCompletionThreshold int64 `yaml:"slabBufferCompleionThreshold,omitempty"` } - // Log contains the configuration for the logger. + // LogFile configures the file output of the logger. + LogFile struct { + Enabled bool `yaml:"enabled,omitempty"` + Level string `yaml:"level,omitempty"` // override the file log level + Format string `yaml:"format,omitempty"` + // Path is the path of the log file. + Path string `yaml:"path,omitempty"` + } + + // StdOut configures the standard output of the logger. + StdOut struct { + Level string `yaml:"level,omitempty"` // override the stdout log level + Enabled bool `yaml:"enabled,omitempty"` + Format string `yaml:"format,omitempty"` + EnableANSI bool `yaml:"enableANSI,omitempty"` //nolint:tagliatelle + } + Log struct { - Path string `yaml:"path,omitempty"` - Level string `yaml:"level,omitempty"` + Path string `yaml:"path,omitempty"` // deprecated. included for compatibility. + Level string `yaml:"level,omitempty"` // global log level + StdOut StdOut `yaml:"stdout,omitempty"` + File LogFile `yaml:"file,omitempty"` + Database DatabaseLog `yaml:"database,omitempty"` } // MySQL contains the configuration for an optional MySQL database. diff --git a/go.mod b/go.mod index c317ab683..0c813f7bb 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,7 @@ require ( gorm.io/driver/sqlite v1.5.5 gorm.io/gorm v1.25.7 lukechampine.com/frand v1.4.2 + moul.io/zapgorm2 v1.3.0 ) require ( diff --git a/go.sum b/go.sum index 519a9325e..d15acfcf9 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,7 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.50.1 h1:AwnLUM7TcH9vMZqA4TcDKmGfLmDW5VXwT5tPH6kXylo= github.com/aws/aws-sdk-go v1.50.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -91,6 +92,7 @@ github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -152,6 +154,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -191,6 +194,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -198,6 +202,7 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= gitlab.com/NebulousLabs/bolt v1.4.4 h1:3UhpR2qtHs87dJBE3CIzhw48GYSoUUNByJmic0cbu1w= gitlab.com/NebulousLabs/bolt v1.4.4/go.mod h1:ZL02cwhpLNif6aruxvUMqu/Bdy0/lFY21jMFfNAA+O8= @@ -255,12 +260,17 @@ go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGw go.sia.tech/web/renterd v0.49.0 h1:z9iDr3gIJ60zqiydDZ2MUbhANm6GwdvRf4k67+Zrj14= go.sia.tech/web/renterd v0.49.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -278,9 +288,11 @@ golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -294,6 +306,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -308,6 +321,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -322,6 +336,7 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -357,6 +372,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= @@ -365,6 +381,7 @@ golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -384,17 +401,22 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.5.4 h1:igQmHfKcbaTVyAIHNhhB888vvxh8EdQ2uSUT0LPcBso= gorm.io/driver/mysql v1.5.4/go.mod h1:9rYxJph/u9SWkWc9yY4XJ1F/+xO0S/ChOmbk3+Z5Tvs= gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= +gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= +moul.io/zapgorm2 v1.3.0 h1:+CzUTMIcnafd0d/BvBce8T4uPn6DQnpIrz64cyixlkk= +moul.io/zapgorm2 v1.3.0/go.mod h1:nPVy6U9goFKHR4s+zfSo1xVFaoU7Qgd5DoCdOfzoCqs= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/internal/node/node.go b/internal/node/node.go index d105cbfb2..8d8a9816c 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -26,16 +26,16 @@ import ( "go.sia.tech/siad/modules/transactionpool" "go.sia.tech/siad/sync" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "golang.org/x/crypto/blake2b" "gorm.io/gorm" + "gorm.io/gorm/logger" ) type BusConfig struct { config.Bus Network *consensus.Network Miner *Miner - DBLoggerConfig stores.LoggerConfig + DBLogger logger.Interface DBDialector gorm.Dialector DBMetricsDialector gorm.Dialector SlabPruningInterval time.Duration @@ -106,7 +106,6 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht } alertsMgr := alerts.NewManager() - sqlLogger := stores.NewSQLLogger(l.Named("db"), cfg.DBLoggerConfig) walletAddr := wallet.StandardAddress(seed.PublicKey()) sqlStoreDir := filepath.Join(dir, "partial_slabs") announcementMaxAge := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour @@ -121,7 +120,7 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht WalletAddress: walletAddr, SlabBufferCompletionThreshold: cfg.SlabBufferCompletionThreshold, Logger: l.Sugar(), - GormLogger: sqlLogger, + GormLogger: cfg.DBLogger, RetryTransactionIntervals: []time.Duration{200 * time.Millisecond, 500 * time.Millisecond, time.Second, 3 * time.Second, 10 * time.Second, 10 * time.Second}, }) if err != nil { @@ -209,43 +208,3 @@ func NewAutopilot(cfg AutopilotConfig, b autopilot.Bus, workers []autopilot.Work } return ap.Handler(), ap.Run, ap.Shutdown, nil } - -func NewLogger(path string) (*zap.Logger, func(context.Context) error, error) { - writer, closeFn, err := zap.Open(path) - if err != nil { - return nil, nil, err - } - - // console - config := zap.NewProductionEncoderConfig() - config.EncodeTime = zapcore.RFC3339TimeEncoder - config.EncodeLevel = zapcore.CapitalColorLevelEncoder - config.StacktraceKey = "" - consoleEncoder := zapcore.NewConsoleEncoder(config) - - // file - config = zap.NewProductionEncoderConfig() - config.EncodeTime = zapcore.RFC3339TimeEncoder - config.CallerKey = "" // hide - config.StacktraceKey = "" // hide - config.NameKey = "component" - config.TimeKey = "date" - fileEncoder := zapcore.NewJSONEncoder(config) - - core := zapcore.NewTee( - zapcore.NewCore(fileEncoder, writer, zapcore.DebugLevel), - zapcore.NewCore(consoleEncoder, zapcore.AddSync(os.Stdout), zapcore.DebugLevel), - ) - - logger := zap.New( - core, - zap.AddCaller(), - zap.AddStacktrace(zapcore.ErrorLevel), - ) - - return logger, func(_ context.Context) error { - _ = logger.Sync() // ignore Error - closeFn() - return nil - }, nil -} diff --git a/stores/logger.go b/stores/logger.go deleted file mode 100644 index d95c1c38a..000000000 --- a/stores/logger.go +++ /dev/null @@ -1,116 +0,0 @@ -package stores - -import ( - "context" - "errors" - "fmt" - "runtime" - "strings" - "time" - - "go.uber.org/zap" - "gorm.io/gorm" - "gorm.io/gorm/logger" -) - -type LoggerConfig struct { - IgnoreRecordNotFoundError bool - LogLevel logger.LogLevel - SlowThreshold time.Duration -} - -type gormLogger struct { - LoggerConfig - l *zap.SugaredLogger -} - -func NewSQLLogger(l *zap.Logger, config LoggerConfig) logger.Interface { - return &gormLogger{ - LoggerConfig: config, - l: l.Sugar(), - } -} - -func (l *gormLogger) LogMode(level logger.LogLevel) logger.Interface { - newlogger := *l - newlogger.LogLevel = level - return &newlogger -} - -func (l gormLogger) Info(ctx context.Context, msg string, args ...interface{}) { - if l.LogLevel < logger.Info { - return - } - l.logger().Infof(msg, args...) -} - -func (l gormLogger) Warn(ctx context.Context, msg string, args ...interface{}) { - if l.LogLevel < logger.Warn { - return - } - l.logger().Warnf(msg, args...) -} - -func (l gormLogger) Error(ctx context.Context, msg string, args ...interface{}) { - if l.LogLevel < logger.Error { - return - } - l.logger().Errorf(msg, args...) -} - -func (l gormLogger) Trace(ctx context.Context, start time.Time, fc func() (sql string, rowsAffected int64), err error) { - if l.LogLevel <= logger.Silent { - return - } - ll := l.logger() - - hideError := errors.Is(err, gorm.ErrRecordNotFound) && l.IgnoreRecordNotFoundError - if err != nil && !hideError && l.LogLevel >= logger.Error { - var log func(string, ...interface{}) - if errors.Is(err, gorm.ErrRecordNotFound) { - log = ll.Debugw - } else { - log = ll.Errorw - } - - sql, rows := fc() - if rows == -1 { - log(err.Error(), "elapsed", elapsedMS(start), "sql", sql) - } else { - log(err.Error(), "elapsed", elapsedMS(start), "rows", rows, "sql", sql) - } - return - } - - if l.SlowThreshold != 0 && time.Since(start) > l.SlowThreshold && l.LogLevel >= logger.Warn { - sql, rows := fc() - if rows == -1 { - ll.Warnw(fmt.Sprintf("SLOW SQL >= %v", l.SlowThreshold), "elapsed", elapsedMS(start), "sql", sql) - } else { - ll.Warnw(fmt.Sprintf("SLOW SQL >= %v", l.SlowThreshold), "elapsed", elapsedMS(start), "rows", rows, "sql", sql) - } - return - } - - if l.LogLevel >= logger.Info { - sql, rows := fc() - ll.Debugw("trace", "elapsed", elapsedMS(start), "rows", rows, "sql", sql) - } -} - -func (l *gormLogger) logger() *zap.SugaredLogger { - for i := 2; i < 15; i++ { - _, file, _, ok := runtime.Caller(i) - switch { - case !ok: - case strings.Contains(file, "gorm"): - default: - return l.l.WithOptions(zap.AddCallerSkip(i)) - } - } - return l.l -} - -func elapsedMS(t time.Time) string { - return fmt.Sprintf("%.3fms", float64(time.Since(t).Nanoseconds())/1e6) -} diff --git a/stores/sql_test.go b/stores/sql_test.go index 842f3c9df..2d29763bb 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -21,6 +21,7 @@ import ( "gorm.io/gorm" "gorm.io/gorm/logger" "lukechampine.com/frand" + "moul.io/zapgorm2" ) const ( @@ -206,11 +207,7 @@ func newTestLogger() logger.Interface { zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel), ) - return NewSQLLogger(l, LoggerConfig{ - IgnoreRecordNotFoundError: false, - LogLevel: logger.Warn, - SlowThreshold: 100 * time.Millisecond, - }) + return zapgorm2.New(l) } func (s *testSQLStore) addTestObject(path string, o object.Object) (api.Object, error) { From a2f1e2bd7da062a4a7583fe3add368bf87660d74 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 18 Mar 2024 13:12:54 +0100 Subject: [PATCH 054/201] all: update Debug logging to Info logging --- autopilot/autopilot.go | 4 +- autopilot/contractor.go | 64 +++++++++++----------- autopilot/ipfilter.go | 2 +- autopilot/migrator.go | 6 +-- autopilot/scanner.go | 10 ++-- bus/accounts.go | 6 +-- stores/logger.go | 116 ++++++++++++++++++++++++++++++++++++++++ wallet/wallet.go | 2 +- worker/host.go | 2 +- worker/rhpv2.go | 10 ++-- worker/worker.go | 6 +-- 11 files changed, 172 insertions(+), 56 deletions(-) create mode 100644 stores/logger.go diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..0fff8b9d1 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -316,7 +316,7 @@ func (ap *Autopilot) Run() error { // launch account refills after successful contract maintenance. if maintenanceSuccess { launchAccountRefillsOnce.Do(func() { - ap.logger.Debug("account refills loop launched") + ap.logger.Info("account refills loop launched") go ap.a.refillWorkersAccountsLoop(ap.shutdownCtx) }) } @@ -328,7 +328,7 @@ func (ap *Autopilot) Run() error { if ap.state.cfg.Contracts.Prune { ap.c.tryPerformPruning(ap.workers) } else { - ap.logger.Debug("pruning disabled") + ap.logger.Info("pruning disabled") } }) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 83e12a206..bbff2c826 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -212,7 +212,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( for _, c := range currentSet { isInCurrentSet[c.ID] = struct{}{} } - c.logger.Debugf("contract set '%s' holds %d contracts", state.cfg.Contracts.Set, len(currentSet)) + c.logger.Infof("contract set '%s' holds %d contracts", state.cfg.Contracts.Set, len(currentSet)) // fetch all contracts from the worker. start := time.Now() @@ -224,7 +224,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( c.logger.Error(resp.Error) } contracts := resp.Contracts - c.logger.Debugf("fetched %d contracts from the worker, took %v", len(resp.Contracts), time.Since(start)) + c.logger.Infof("fetched %d contracts from the worker, took %v", len(resp.Contracts), time.Since(start)) // run revision broadcast c.runRevisionBroadcast(ctx, w, contracts, isInCurrentSet) @@ -317,7 +317,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // archive contracts if len(toArchive) > 0 { - c.logger.Debugf("archiving %d contracts: %+v", len(toArchive), toArchive) + c.logger.Infof("archiving %d contracts: %+v", len(toArchive), toArchive) if err := c.ap.bus.ArchiveContracts(ctx, toArchive); err != nil { c.logger.Errorf("failed to archive contracts, err: %v", err) // continue } @@ -487,7 +487,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, Time: now, }) setRemovals[contract.ID] = removals - c.logger.Debugf("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason) + c.logger.Infof("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason) } } for _, contract := range newSet { @@ -505,7 +505,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, Time: now, }) setAdditions[contract.ID] = additions - c.logger.Debugf("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID]) + c.logger.Infof("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID]) } } @@ -513,12 +513,12 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, for _, fcid := range renewed { _, exists := inNewSet[fcid.to.ID] if !exists { - c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to.ID]) + c.logger.Infof("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to.ID]) } } // log a warning if the contract set does not contain enough contracts - logFn := c.logger.Debugw + logFn := c.logger.Infow if len(newSet) < int(c.ap.State().rs.TotalShards) { logFn = c.logger.Warnw } @@ -626,7 +626,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error { for _, txn := range pending { for _, mTxnID := range c.maintenanceTxnIDs { if mTxnID == txn.ID() { - l.Debugf("wallet maintenance skipped, pending transaction found with id %v", mTxnID) + l.Infof("wallet maintenance skipped, pending transaction found with id %v", mTxnID) return nil } } @@ -640,7 +640,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error { return err } if uint64(len(available)) >= uint64(wantedNumOutputs) { - l.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), uint64(wantedNumOutputs)) + l.Infof("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), uint64(wantedNumOutputs)) return nil } wantedNumOutputs -= len(available) @@ -654,7 +654,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error { return fmt.Errorf("failed to redistribute wallet into %d outputs of amount %v, balance %v, err %v", wantedNumOutputs, amount, balance, err) } - l.Debugf("wallet maintenance succeeded, txns %v", ids) + l.Infof("wallet maintenance succeeded, txns %v", ids) c.maintenanceTxnIDs = ids return nil } @@ -663,7 +663,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts if c.ap.isStopped() { return } - c.logger.Debug("running contract checks") + c.logger.Info("running contract checks") // convenience variables state := c.ap.State() @@ -685,7 +685,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts var notfound int defer func() { - c.logger.Debugw( + c.logger.Infow( "contracts checks completed", "contracts", len(contracts), "notfound", notfound, @@ -853,7 +853,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid state := c.ap.State() shouldFilter := !state.cfg.Hosts.AllowRedundantIPs - c.logger.Debugw( + c.logger.Infow( "run contract formations", "usedHosts", len(usedHosts), "required", state.cfg.Contracts.Amount, @@ -861,7 +861,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid "budget", budget, ) defer func() { - c.logger.Debugw( + c.logger.Infow( "contract formations completed", "formed", len(formed), "budget", budget, @@ -873,7 +873,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid selected := candidates.randSelectByScore(wanted) // print warning if we couldn't find enough hosts were found - c.logger.Debugf("looking for %d candidate hosts", wanted) + c.logger.Infof("looking for %d candidate hosts", wanted) if len(selected) < wanted { msg := "no candidate hosts found" if len(selected) > 0 { @@ -882,7 +882,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid if len(candidates) >= wanted { c.logger.Warnw(msg, unusableHosts.keysAndValues()...) } else { - c.logger.Debugw(msg, unusableHosts.keysAndValues()...) + c.logger.Infow(msg, unusableHosts.keysAndValues()...) } } @@ -1030,14 +1030,14 @@ func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { - c.logger.Debugw( + c.logger.Infow( "run contracts renewals", "torenew", len(toRenew), "limit", limit, "budget", budget, ) defer func() { - c.logger.Debugw( + c.logger.Infow( "contracts renewals completed", "renewals", len(renewals), "tokeep", len(toKeep), @@ -1088,13 +1088,13 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew } func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { - c.logger.Debugw( + c.logger.Infow( "run contracts refreshes", "torefresh", len(toRefresh), "budget", budget, ) defer func() { - c.logger.Debugw( + c.logger.Infow( "contracts refreshes completed", "refreshed", len(refreshed), "budget", budget, @@ -1152,7 +1152,7 @@ func (c *contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contract if refreshAmountCapped.Cmp(minimum) < 0 { refreshAmountCapped = minimum } - c.logger.Debugw("refresh estimate", + c.logger.Infow("refresh estimate", "fcid", ci.contract.ID, "refreshAmount", refreshAmount, "refreshAmountCapped", refreshAmountCapped) @@ -1228,7 +1228,7 @@ func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, } if renewing { - c.logger.Debugw("renew estimate", + c.logger.Infow("renew estimate", "fcid", ci.contract.ID, "dataStored", dataStored, "storageCost", storageCost.String(), @@ -1327,7 +1327,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us unused = append(unused, h) } - c.logger.Debugw(fmt.Sprintf("selected %d (potentially) usable hosts for scoring out of %d", len(unused), len(hosts)), + c.logger.Infow(fmt.Sprintf("selected %d (potentially) usable hosts for scoring out of %d", len(unused), len(hosts)), "excluded", excluded, "notcompletedscan", notcompletedscan, "used", len(usedHosts)) @@ -1360,7 +1360,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us unusable++ } - c.logger.Debugw(fmt.Sprintf("scored %d unused hosts out of %v, took %v", len(candidates), len(unused), time.Since(start)), + c.logger.Infow(fmt.Sprintf("scored %d unused hosts out of %v, took %v", len(candidates), len(unused), time.Since(start)), "zeroscore", zeros, "unusable", unusable, "used", len(usedHosts)) @@ -1397,14 +1397,14 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf // check our budget if budget.Cmp(renterFunds) < 0 { - c.logger.Debugw("insufficient budget", "budget", budget, "needed", renterFunds) + c.logger.Infow("insufficient budget", "budget", budget, "needed", renterFunds) return api.ContractMetadata{}, false, errors.New("insufficient budget") } // sanity check the endheight is not the same on renewals endHeight := endHeight(cfg, state.period) if endHeight <= rev.EndHeight() { - c.logger.Debugw("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", state.period, "bh", cs.BlockHeight) + c.logger.Infow("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", state.period, "bh", cs.BlockHeight) return api.ContractMetadata{}, false, fmt.Errorf("renewal endheight should surpass the current contract endheight, %v <= %v", endHeight, rev.EndHeight()) } @@ -1440,7 +1440,7 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf } newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice) - c.logger.Debugw( + c.logger.Infow( "renewal succeeded", "fcid", renewedContract.ID, "renewedFrom", fcid, @@ -1493,7 +1493,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, state.address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize) if err != nil { if strings.Contains(err.Error(), "new collateral is too low") { - c.logger.Debugw("refresh failed: contract wouldn't have enough collateral after refresh", + c.logger.Infow("refresh failed: contract wouldn't have enough collateral after refresh", "hk", hk, "fcid", fcid, "unallocatedCollateral", unallocatedCollateral.String(), @@ -1520,7 +1520,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI // add to renewed set newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice) - c.logger.Debugw("refresh succeeded", + c.logger.Infow("refresh succeeded", "fcid", refreshedContract.ID, "renewedFrom", contract.ID, "renterFunds", renterFunds.String(), @@ -1538,7 +1538,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos // fetch host settings scan, err := w.RHPScan(ctx, hk, host.NetAddress, 0) if err != nil { - c.logger.Debugw(err.Error(), "hk", hk) + c.logger.Infow(err.Error(), "hk", hk) return api.ContractMetadata{}, true, err } @@ -1552,7 +1552,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos txnFee := state.fee.Mul64(estimatedFileContractTransactionSetSize) renterFunds := initialContractFunding(scan.Settings, txnFee, minInitialContractFunds, maxInitialContractFunds) if budget.Cmp(renterFunds) < 0 { - c.logger.Debugw("insufficient budget", "budget", budget, "needed", renterFunds) + c.logger.Infow("insufficient budget", "budget", budget, "needed", renterFunds) return api.ContractMetadata{}, false, errors.New("insufficient budget") } @@ -1583,7 +1583,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos return api.ContractMetadata{}, true, err } - c.logger.Debugw("formation succeeded", + c.logger.Infow("formation succeeded", "hk", hk, "fcid", formedContract.ID, "renterFunds", renterFunds.String(), diff --git a/autopilot/ipfilter.go b/autopilot/ipfilter.go index 0932d7676..aa1730cd2 100644 --- a/autopilot/ipfilter.go +++ b/autopilot/ipfilter.go @@ -140,7 +140,7 @@ func (r *ipResolver) lookup(hostIP string) ([]string, error) { // check the cache if it's an i/o timeout or server misbehaving error if utils.IsErr(err, errIOTimeout) || utils.IsErr(err, errServerMisbehaving) { if entry, found := r.cache[hostIP]; found && time.Since(entry.created) < ipCacheEntryValidity { - r.logger.Debugf("using cached IP addresses for %v, err: %v", hostIP, err) + r.logger.Infof("using cached IP addresses for %v, err: %v", hostIP, err) return entry.subnets, nil } } diff --git a/autopilot/migrator.go b/autopilot/migrator.go index 89ab16a28..94497f195 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -205,7 +205,7 @@ OUTER: m.logger.Errorf("failed to recompute cached health before migration: %v", err) return } - m.logger.Debugf("recomputed slab health in %v", time.Since(start)) + m.logger.Infof("recomputed slab health in %v", time.Since(start)) // fetch slabs for migration toMigrateNew, err := b.SlabsForMigration(m.ap.shutdownCtx, m.healthCutoff, set, migratorBatchSize) @@ -213,7 +213,7 @@ OUTER: m.logger.Errorf("failed to fetch slabs for migration, err: %v", err) return } - m.logger.Debugf("%d potential slabs fetched for migration", len(toMigrateNew)) + m.logger.Infof("%d potential slabs fetched for migration", len(toMigrateNew)) // merge toMigrateNew with toMigrate // NOTE: when merging, we remove all slabs from toMigrate that don't @@ -248,7 +248,7 @@ OUTER: migrateNewMap = nil // free map // log the updated list of slabs to migrate - m.logger.Debugf("%d slabs to migrate", len(toMigrate)) + m.logger.Infof("%d slabs to migrate", len(toMigrate)) // register an alert to notify users about ongoing migrations. if len(toMigrate) > 0 { diff --git a/autopilot/scanner.go b/autopilot/scanner.go index bb21e5022..75f5628cf 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -215,7 +215,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo minRecentScanFailures := hostCfg.MinRecentScanFailures if !interrupted && maxDowntime > 0 { - s.logger.Debugf("removing hosts that have been offline for more than %v and have failed at least %d scans", maxDowntime, minRecentScanFailures) + s.logger.Infof("removing hosts that have been offline for more than %v and have failed at least %d scans", maxDowntime, minRecentScanFailures) removed, err := s.bus.RemoveOfflineHosts(ctx, minRecentScanFailures, maxDowntime) if err != nil { s.logger.Errorf("error occurred while removing offline hosts, err: %v", err) @@ -226,7 +226,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo s.mu.Lock() s.scanning = false - s.logger.Debugf("%s finished after %v", st, time.Since(s.scanningLastStart)) + s.logger.Infof("%s finished after %v", st, time.Since(s.scanningLastStart)) s.mu.Unlock() }(scanType) return @@ -241,12 +241,12 @@ func (s *scanner) tryUpdateTimeout() { updated := s.tracker.timeout() if updated < s.timeoutMinTimeout { - s.logger.Debugf("updated timeout is lower than min timeout, %v<%v", updated, s.timeoutMinTimeout) + s.logger.Infof("updated timeout is lower than min timeout, %v<%v", updated, s.timeoutMinTimeout) updated = s.timeoutMinTimeout } if s.timeout != updated { - s.logger.Debugf("updated timeout %v->%v", s.timeout, updated) + s.logger.Infof("updated timeout %v->%v", s.timeout, updated) s.timeout = updated } s.timeoutLastUpdate = time.Now() @@ -281,7 +281,7 @@ func (s *scanner) launchHostScans() chan scanReq { exhausted = true } - s.logger.Debugf("scanning %d hosts in range %d-%d", len(hosts), offset, offset+int(s.scanBatchSize)) + s.logger.Infof("scanning %d hosts in range %d-%d", len(hosts), offset, offset+int(s.scanBatchSize)) offset += int(s.scanBatchSize) // add batch to scan queue diff --git a/bus/accounts.go b/bus/accounts.go index 85b575bdd..42dafefcf 100644 --- a/bus/accounts.go +++ b/bus/accounts.go @@ -136,7 +136,7 @@ func (a *accounts) AddAmount(id rhpv3.Account, hk types.PublicKey, amt *big.Int) // Log deposits. if amt.Cmp(big.NewInt(0)) > 0 { - a.logger.Debugw("account balance was increased", + a.logger.Infow("account balance was increased", "account", acc.ID, "host", acc.HostKey.String(), "amt", amt.String(), @@ -167,7 +167,7 @@ func (a *accounts) SetBalance(id rhpv3.Account, hk types.PublicKey, balance *big acc.mu.Unlock() // Log resets. - a.logger.Debugw("account balance was reset", + a.logger.Infow("account balance was reset", "account", acc.ID, "host", acc.HostKey.String(), "balanceBefore", balanceBefore, @@ -192,7 +192,7 @@ func (a *accounts) ScheduleSync(id rhpv3.Account, hk types.PublicKey) error { acc.requiresSyncTime = time.Now() // Log scheduling a sync. - a.logger.Debugw("account sync was scheduled", + a.logger.Infow("account sync was scheduled", "account", acc.ID, "host", acc.HostKey.String(), "balance", acc.Balance.String(), diff --git a/stores/logger.go b/stores/logger.go new file mode 100644 index 000000000..da04d0755 --- /dev/null +++ b/stores/logger.go @@ -0,0 +1,116 @@ +package stores + +import ( + "context" + "errors" + "fmt" + "runtime" + "strings" + "time" + + "go.uber.org/zap" + "gorm.io/gorm" + "gorm.io/gorm/logger" +) + +type LoggerConfig struct { + IgnoreRecordNotFoundError bool + LogLevel logger.LogLevel + SlowThreshold time.Duration +} + +type gormLogger struct { + LoggerConfig + l *zap.SugaredLogger +} + +func NewSQLLogger(l *zap.Logger, config LoggerConfig) logger.Interface { + return &gormLogger{ + LoggerConfig: config, + l: l.Sugar(), + } +} + +func (l *gormLogger) LogMode(level logger.LogLevel) logger.Interface { + newlogger := *l + newlogger.LogLevel = level + return &newlogger +} + +func (l gormLogger) Info(ctx context.Context, msg string, args ...interface{}) { + if l.LogLevel < logger.Info { + return + } + l.logger().Infof(msg, args...) +} + +func (l gormLogger) Warn(ctx context.Context, msg string, args ...interface{}) { + if l.LogLevel < logger.Warn { + return + } + l.logger().Warnf(msg, args...) +} + +func (l gormLogger) Error(ctx context.Context, msg string, args ...interface{}) { + if l.LogLevel < logger.Error { + return + } + l.logger().Errorf(msg, args...) +} + +func (l gormLogger) Trace(ctx context.Context, start time.Time, fc func() (sql string, rowsAffected int64), err error) { + if l.LogLevel <= logger.Silent { + return + } + ll := l.logger() + + hideError := errors.Is(err, gorm.ErrRecordNotFound) && l.IgnoreRecordNotFoundError + if err != nil && !hideError && l.LogLevel >= logger.Error { + var log func(string, ...interface{}) + if errors.Is(err, gorm.ErrRecordNotFound) { + log = ll.Debugw + } else { + log = ll.Errorw + } + + sql, rows := fc() + if rows == -1 { + log(err.Error(), "elapsed", elapsedMS(start), "sql", sql) + } else { + log(err.Error(), "elapsed", elapsedMS(start), "rows", rows, "sql", sql) + } + return + } + + if l.SlowThreshold != 0 && time.Since(start) > l.SlowThreshold && l.LogLevel >= logger.Warn { + sql, rows := fc() + if rows == -1 { + ll.Warnw(fmt.Sprintf("SLOW SQL >= %v", l.SlowThreshold), "elapsed", elapsedMS(start), "sql", sql) + } else { + ll.Warnw(fmt.Sprintf("SLOW SQL >= %v", l.SlowThreshold), "elapsed", elapsedMS(start), "rows", rows, "sql", sql) + } + return + } + + if l.LogLevel >= logger.Info { + sql, rows := fc() + ll.Infow("trace", "elapsed", elapsedMS(start), "rows", rows, "sql", sql) + } +} + +func (l *gormLogger) logger() *zap.SugaredLogger { + for i := 2; i < 15; i++ { + _, file, _, ok := runtime.Caller(i) + switch { + case !ok: + case strings.Contains(file, "gorm"): + default: + return l.l.WithOptions(zap.AddCallerSkip(i)) + } + } + return l.l +} + +func elapsedMS(t time.Time) string { + return fmt.Sprintf("%.3fms", float64(time.Since(t).Nanoseconds())/1e6) +} diff --git a/wallet/wallet.go b/wallet/wallet.go index ff9a2e92e..6c641ed42 100644 --- a/wallet/wallet.go +++ b/wallet/wallet.go @@ -583,7 +583,7 @@ func (w *SingleAddressWallet) ReceiveUpdatedUnconfirmedTransactions(diff *module if !ok { // note: happens during deep reorgs. Possibly a race // condition in siad. Log and skip. - w.log.Debug("tpool transaction unknown utxo", zap.Stringer("outputID", sci.ParentID), zap.Stringer("txnID", txn.ID())) + w.log.Info("tpool transaction unknown utxo", zap.Stringer("outputID", sci.ParentID), zap.Stringer("txnID", txn.ID())) continue txnLoop } processed.Outflow = processed.Outflow.Add(output.Value) diff --git a/worker/host.go b/worker/host.go index fceeaba00..cd29572cc 100644 --- a/worker/host.go +++ b/worker/host.go @@ -163,7 +163,7 @@ func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rh if err == nil { pt = &hpt.HostPriceTable } else { - h.logger.Debugf("unable to fetch price table for renew: %v", err) + h.logger.Infof("unable to fetch price table for renew: %v", err) } var contractPrice types.Currency diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 9f05904a4..7f8c84d68 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -333,7 +333,7 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types } func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, indices []uint64) (deleted uint64, err error) { - w.logger.Debugw(fmt.Sprintf("deleting %d contract roots (%v)", len(indices), humanReadableSize(len(indices)*rhpv2.SectorSize)), "hk", rev.HostKey(), "fcid", rev.ID()) + w.logger.Infow(fmt.Sprintf("deleting %d contract roots (%v)", len(indices), humanReadableSize(len(indices)*rhpv2.SectorSize)), "hk", rev.HostKey(), "fcid", rev.ID()) // return early if len(indices) == 0 { @@ -374,9 +374,9 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi if err = func() error { var cost types.Currency start := time.Now() - w.logger.Debugw(fmt.Sprintf("starting batch %d/%d of size %d", i+1, len(batches), len(batch))) + w.logger.Infow(fmt.Sprintf("starting batch %d/%d of size %d", i+1, len(batches), len(batch))) defer func() { - w.logger.Debugw(fmt.Sprintf("processing batch %d/%d of size %d took %v", i+1, len(batches), len(batch), time.Since(start)), "cost", cost) + w.logger.Infow(fmt.Sprintf("processing batch %d/%d of size %d took %v", i+1, len(batches), len(batch), time.Since(start)), "cost", cost) }() numSectors := rev.NumSectors() @@ -456,7 +456,7 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi return err } else if err := t.ReadResponse(&merkleResp, minMessageSize+responseSize); err != nil { err := fmt.Errorf("couldn't read Merkle proof response, err: %v", err) - w.logger.Debugw(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) + w.logger.Infow(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) return err } @@ -466,7 +466,7 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi oldRoot, newRoot := types.Hash256(rev.Revision.FileMerkleRoot), merkleResp.NewMerkleRoot if rev.Revision.Filesize > 0 && !rhpv2.VerifyDiffProof(actions, numSectors, proofHashes, leafHashes, oldRoot, newRoot, nil) { err := fmt.Errorf("couldn't verify delete proof, host %v, version %v; %w", rev.HostKey(), settings.Version, ErrInvalidMerkleProof) - w.logger.Debugw(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) + w.logger.Infow(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) t.WriteResponseErr(err) return err } diff --git a/worker/worker.go b/worker/worker.go index 0868c347c..5aa14d7b6 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -688,7 +688,7 @@ func (w *worker) rhpFundHandler(jc jape.Context) { // sync the account err = h.SyncAccount(ctx, &rev) if err != nil { - w.logger.Debugf(fmt.Sprintf("failed to sync account: %v", err), "host", rfr.HostKey) + w.logger.Infof(fmt.Sprintf("failed to sync account: %v", err), "host", rfr.HostKey) return } @@ -1503,9 +1503,9 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty logger = logger.With("elapsed", duration) if err == nil { - logger.Debug("successfully scanned host on second try") + logger.Info("successfully scanned host on second try") } else if !isErrHostUnreachable(err) { - logger.Debugw("failed to scan host", zap.Error(err)) + logger.Infow("failed to scan host", zap.Error(err)) } } From 3b4200bb280a371eae9509133f60d9ec81418c32 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 18 Mar 2024 13:14:14 +0100 Subject: [PATCH 055/201] stores: remove logger.go --- stores/logger.go | 116 ----------------------------------------------- 1 file changed, 116 deletions(-) delete mode 100644 stores/logger.go diff --git a/stores/logger.go b/stores/logger.go deleted file mode 100644 index da04d0755..000000000 --- a/stores/logger.go +++ /dev/null @@ -1,116 +0,0 @@ -package stores - -import ( - "context" - "errors" - "fmt" - "runtime" - "strings" - "time" - - "go.uber.org/zap" - "gorm.io/gorm" - "gorm.io/gorm/logger" -) - -type LoggerConfig struct { - IgnoreRecordNotFoundError bool - LogLevel logger.LogLevel - SlowThreshold time.Duration -} - -type gormLogger struct { - LoggerConfig - l *zap.SugaredLogger -} - -func NewSQLLogger(l *zap.Logger, config LoggerConfig) logger.Interface { - return &gormLogger{ - LoggerConfig: config, - l: l.Sugar(), - } -} - -func (l *gormLogger) LogMode(level logger.LogLevel) logger.Interface { - newlogger := *l - newlogger.LogLevel = level - return &newlogger -} - -func (l gormLogger) Info(ctx context.Context, msg string, args ...interface{}) { - if l.LogLevel < logger.Info { - return - } - l.logger().Infof(msg, args...) -} - -func (l gormLogger) Warn(ctx context.Context, msg string, args ...interface{}) { - if l.LogLevel < logger.Warn { - return - } - l.logger().Warnf(msg, args...) -} - -func (l gormLogger) Error(ctx context.Context, msg string, args ...interface{}) { - if l.LogLevel < logger.Error { - return - } - l.logger().Errorf(msg, args...) -} - -func (l gormLogger) Trace(ctx context.Context, start time.Time, fc func() (sql string, rowsAffected int64), err error) { - if l.LogLevel <= logger.Silent { - return - } - ll := l.logger() - - hideError := errors.Is(err, gorm.ErrRecordNotFound) && l.IgnoreRecordNotFoundError - if err != nil && !hideError && l.LogLevel >= logger.Error { - var log func(string, ...interface{}) - if errors.Is(err, gorm.ErrRecordNotFound) { - log = ll.Debugw - } else { - log = ll.Errorw - } - - sql, rows := fc() - if rows == -1 { - log(err.Error(), "elapsed", elapsedMS(start), "sql", sql) - } else { - log(err.Error(), "elapsed", elapsedMS(start), "rows", rows, "sql", sql) - } - return - } - - if l.SlowThreshold != 0 && time.Since(start) > l.SlowThreshold && l.LogLevel >= logger.Warn { - sql, rows := fc() - if rows == -1 { - ll.Warnw(fmt.Sprintf("SLOW SQL >= %v", l.SlowThreshold), "elapsed", elapsedMS(start), "sql", sql) - } else { - ll.Warnw(fmt.Sprintf("SLOW SQL >= %v", l.SlowThreshold), "elapsed", elapsedMS(start), "rows", rows, "sql", sql) - } - return - } - - if l.LogLevel >= logger.Info { - sql, rows := fc() - ll.Infow("trace", "elapsed", elapsedMS(start), "rows", rows, "sql", sql) - } -} - -func (l *gormLogger) logger() *zap.SugaredLogger { - for i := 2; i < 15; i++ { - _, file, _, ok := runtime.Caller(i) - switch { - case !ok: - case strings.Contains(file, "gorm"): - default: - return l.l.WithOptions(zap.AddCallerSkip(i)) - } - } - return l.l -} - -func elapsedMS(t time.Time) string { - return fmt.Sprintf("%.3fms", float64(time.Since(t).Nanoseconds())/1e6) -} From 0a6169c0a8a41af3ec253671f7b801e78aad8b60 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 18 Mar 2024 13:49:07 +0100 Subject: [PATCH 056/201] main: add cli flags and env var overrides --- cmd/renterd/logger.go | 2 +- cmd/renterd/main.go | 49 +++++++++++++++++++++++++++++++++++-------- config/config.go | 2 ++ 3 files changed, 43 insertions(+), 10 deletions(-) diff --git a/cmd/renterd/logger.go b/cmd/renterd/logger.go index 964a1ce39..9e78017a2 100644 --- a/cmd/renterd/logger.go +++ b/cmd/renterd/logger.go @@ -23,7 +23,7 @@ func NewLogger(dir string, cfg config.Log) (*zap.Logger, func(context.Context) e } // log level - level, err := zapcore.ParseLevel(cfg.Path) + level, err := zapcore.ParseLevel(cfg.Level) if err != nil { return nil, nil, fmt.Errorf("failed to parse log level: %w", err) } diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 83a1f88e7..35ce82d97 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -96,6 +96,7 @@ var ( EnableANSI: runtime.GOOS != "windows", }, Database: config.DatabaseLog{ + Enabled: true, IgnoreRecordNotFoundError: true, SlowThreshold: 100 * time.Millisecond, }, @@ -261,10 +262,28 @@ func main() { // overwrite anything set in the config file. tryLoadConfig() + // deprecated - these go first so that they can be overwritten by the non-deprecated flags + flag.StringVar(&cfg.Log.Database.Level, "db.logger.logLevel", cfg.Log.Level, "(deprecated) Logger level (overrides with RENTERD_DB_LOGGER_LOG_LEVEL)") + flag.BoolVar(&cfg.Database.Log.IgnoreRecordNotFoundError, "db.logger.ignoreNotFoundError", cfg.Database.Log.IgnoreRecordNotFoundError, "(deprecated) Ignores 'not found' errors in logger (overrides with RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR)") + flag.DurationVar(&cfg.Database.Log.SlowThreshold, "db.logger.slowThreshold", cfg.Database.Log.SlowThreshold, "(deprecated) Threshold for slow queries in logger (overrides with RENTERD_DB_LOGGER_SLOW_THRESHOLD)") + flag.StringVar(&cfg.Log.Path, "log-path", cfg.Log.Path, "(deprecated) Path to directory for logs (overrides with RENTERD_LOG_PATH)") + // node flag.StringVar(&cfg.HTTP.Address, "http", cfg.HTTP.Address, "Address for serving the API") flag.StringVar(&cfg.Directory, "dir", cfg.Directory, "Directory for storing node state") - flag.StringVar(&cfg.Log.Path, "log-path", cfg.Log.Path, "Path for logs (overrides with RENTERD_LOG_PATH)") + + // logger + flag.StringVar(&cfg.Log.Level, "log.level", cfg.Log.Level, "Global logger level (info|warn|error). Defaults to 'info' (overrides with RENTERD_LOG_LEVEL)") + flag.BoolVar(&cfg.Log.File.Enabled, "log.file.enabled", cfg.Log.File.Enabled, "Enables logging to disk. Defaults to 'true'. (overrides with RENTERD_LOG_FILE_ENABLED)") + flag.StringVar(&cfg.Log.File.Format, "log.file.format", cfg.Log.File.Format, "Format of log file (json|human). Defaults to 'json' (overrides with RENTERD_LOG_FILE_FORMAT)") + flag.StringVar(&cfg.Log.File.Path, "log.file.path", cfg.Log.File.Path, "Path of log file. Defaults to 'renterd.log' within the renterd directory. (overrides with RENTERD_LOG_FILE_PATH)") + flag.BoolVar(&cfg.Log.StdOut.Enabled, "log.stdout.enabled", cfg.Log.StdOut.Enabled, "Enables logging to stdout. Defaults to 'true'. (overrides with RENTERD_LOG_STDOUT_ENABLED)") + flag.StringVar(&cfg.Log.StdOut.Format, "log.stdout.format", cfg.Log.StdOut.Format, "Format of log output (json|human). Defaults to 'human' (overrides with RENTERD_LOG_STDOUT_FORMAT)") + flag.BoolVar(&cfg.Log.StdOut.EnableANSI, "log.stdout.enableANSI", cfg.Log.StdOut.EnableANSI, "Enables ANSI color codes in log output. Defaults to 'true' on non-Windows systems. (overrides with RENTERD_LOG_STDOUT_ENABLE_ANSI)") + flag.BoolVar(&cfg.Log.Database.Enabled, "log.database.enabled", cfg.Log.Database.Enabled, "Enable logging database queries. Defaults to 'true' (overrides with RENTERD_LOG_DATABASE_ENABLED)") + flag.StringVar(&cfg.Log.Database.Level, "log.database.level", cfg.Log.Database.Level, "Logger level for database queries (info|warn|error). Defaults to 'info' (overrides with RENTERD_LOG_DATABASE_LEVEL)") + flag.BoolVar(&cfg.Log.Database.IgnoreRecordNotFoundError, "log.database.ignoreRecordNotFoundError", cfg.Log.Database.IgnoreRecordNotFoundError, "Enable ignoring 'not found' errors resulting from database queries. Defaults to 'true' (overrides with RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR)") + flag.DurationVar(&cfg.Log.Database.SlowThreshold, "log.database.slowThreshold", cfg.Log.Database.SlowThreshold, "Threshold for slow queries in logger. Defaults to 100ms (overrides with RENTERD_LOG_DATABASE_SLOW_THRESHOLD)") // db flag.StringVar(&cfg.Database.MySQL.URI, "db.uri", cfg.Database.MySQL.URI, "Database URI for the bus (overrides with RENTERD_DB_URI)") @@ -272,11 +291,6 @@ func main() { flag.StringVar(&cfg.Database.MySQL.Database, "db.name", cfg.Database.MySQL.Database, "Database name for the bus (overrides with RENTERD_DB_NAME)") flag.StringVar(&cfg.Database.MySQL.MetricsDatabase, "db.metricsName", cfg.Database.MySQL.MetricsDatabase, "Database for metrics (overrides with RENTERD_DB_METRICS_NAME)") - // db logger - flag.BoolVar(&cfg.Database.Log.IgnoreRecordNotFoundError, "db.logger.ignoreNotFoundError", cfg.Database.Log.IgnoreRecordNotFoundError, "Ignores 'not found' errors in logger (overrides with RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR)") - flag.StringVar(&cfg.Log.Level, "db.logger.logLevel", cfg.Log.Level, "Logger level (overrides with RENTERD_DB_LOGGER_LOG_LEVEL)") - flag.DurationVar(&cfg.Database.Log.SlowThreshold, "db.logger.slowThreshold", cfg.Database.Log.SlowThreshold, "Threshold for slow queries in logger (overrides with RENTERD_DB_LOGGER_SLOW_THRESHOLD)") - // bus flag.Uint64Var(&cfg.Bus.AnnouncementMaxAgeHours, "bus.announcementMaxAgeHours", cfg.Bus.AnnouncementMaxAgeHours, "Max age for announcements") flag.BoolVar(&cfg.Bus.Bootstrap, "bus.bootstrap", cfg.Bus.Bootstrap, "Bootstraps gateway and consensus modules") @@ -384,6 +398,18 @@ func main() { parseEnvVar("RENTERD_S3_DISABLE_AUTH", &cfg.S3.DisableAuth) parseEnvVar("RENTERD_S3_HOST_BUCKET_ENABLED", &cfg.S3.HostBucketEnabled) + parseEnvVar("RENTERD_LOG_LEVEL", &cfg.Log.Level) + parseEnvVar("RENTERD_LOG_FILE_ENABLED", &cfg.Log.File.Enabled) + parseEnvVar("RENTERD_LOG_FILE_FORMAT", &cfg.Log.File.Format) + parseEnvVar("RENTERD_LOG_FILE_PATH", &cfg.Log.File.Path) + parseEnvVar("RENTERD_LOG_STDOUT_ENABLED", &cfg.Log.StdOut.Enabled) + parseEnvVar("RENTERD_LOG_STDOUT_FORMAT", &cfg.Log.StdOut.Format) + parseEnvVar("RENTERD_LOG_STDOUT_ENABLE_ANSI", &cfg.Log.StdOut.EnableANSI) + parseEnvVar("RENTERD_LOG_DATABASE_ENABLED", &cfg.Log.Database.Enabled) + parseEnvVar("RENTERD_LOG_DATABASE_LEVEL", &cfg.Log.Database.Level) + parseEnvVar("RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR", &cfg.Log.Database.IgnoreRecordNotFoundError) + parseEnvVar("RENTERD_LOG_DATABASE_SLOW_THRESHOLD", &cfg.Log.Database.SlowThreshold) + if cfg.S3.Enabled { var keyPairsV4 string parseEnvVar("RENTERD_S3_KEYPAIRS_V4", &keyPairsV4) @@ -425,10 +451,12 @@ func main() { } // Log level for db + lvlStr := cfg.Log.Level + if cfg.Log.Database.Level != "" { + lvlStr = cfg.Log.Database.Level + } var level logger.LogLevel - switch strings.ToLower(cfg.Log.Level) { - case "silent": - level = logger.Silent + switch strings.ToLower(lvlStr) { case "error": level = logger.Error case "warn": @@ -438,6 +466,9 @@ func main() { default: log.Fatalf("invalid log level %q, options are: silent, error, warn, info", cfg.Log.Level) } + if !cfg.Log.Database.Enabled { + level = logger.Silent + } // Create logger. logger, closeFn, err := NewLogger(cfg.Directory, cfg.Log) diff --git a/config/config.go b/config/config.go index 40a26bda0..5f657b0a9 100644 --- a/config/config.go +++ b/config/config.go @@ -30,6 +30,8 @@ type ( } DatabaseLog struct { + Enabled bool `yaml:"enabled,omitempty"` + Level string `yaml:"level,omitempty"` IgnoreRecordNotFoundError bool `yaml:"ignoreRecordNotFoundError,omitempty"` SlowThreshold time.Duration `yaml:"slowThreshold,omitempty"` } From 46da673975e7157a7c48ec46761a42a4cd320ff7 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 18 Mar 2024 14:01:33 +0100 Subject: [PATCH 057/201] main: fix stdout logger using file logger format --- cmd/renterd/logger.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/renterd/logger.go b/cmd/renterd/logger.go index 9e78017a2..22308d88b 100644 --- a/cmd/renterd/logger.go +++ b/cmd/renterd/logger.go @@ -72,7 +72,7 @@ func NewLogger(dir string, cfg config.Log) (*zap.Logger, func(context.Context) e // stdout logger if cfg.StdOut.Enabled { var encoder zapcore.Encoder - switch cfg.File.Format { + switch cfg.StdOut.Format { case "json": encoder = jsonEncoder() default: // stdout defaults to human @@ -98,7 +98,6 @@ func jsonEncoder() zapcore.Encoder { // text. func humanEncoder(showColors bool) zapcore.Encoder { cfg := zap.NewProductionEncoderConfig() - cfg.TimeKey = "" // prevent duplicate timestamps cfg.EncodeTime = zapcore.RFC3339TimeEncoder cfg.EncodeDuration = zapcore.StringDurationEncoder From 8e47893580dae2e8403e6de56e36670bc94359f4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 18 Mar 2024 15:06:25 +0100 Subject: [PATCH 058/201] autopilot: no more gouging on collateral --- api/setting.go | 4 ---- autopilot/autopilot.go | 1 - autopilot/hostscore.go | 2 +- build/env_default.go | 1 - internal/test/config.go | 1 - worker/gouging.go | 12 ------------ 6 files changed, 1 insertion(+), 20 deletions(-) diff --git a/api/setting.go b/api/setting.go index 47785c9aa..86162c0a8 100644 --- a/api/setting.go +++ b/api/setting.go @@ -38,10 +38,6 @@ type ( // GougingSettings contain some price settings used in price gouging. GougingSettings struct { - // MinMaxCollateral is the minimum value for 'MaxCollateral' in the host's - // price settings - MinMaxCollateral types.Currency `json:"minMaxCollateral"` - // MaxRPCPrice is the maximum allowed base price for RPCs MaxRPCPrice types.Currency `json:"maxRPCPrice"` diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..dc5b84dfc 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -804,7 +804,6 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // these are not optimised, so we keep the same values as the user // provided - MinMaxCollateral: gs.MinMaxCollateral, HostBlockHeightLeeway: gs.HostBlockHeightLeeway, MinPriceTableValidity: gs.MinPriceTableValidity, MinAccountExpiry: gs.MinAccountExpiry, diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index e8d9ca9b9..3c26dce42 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -161,7 +161,7 @@ func collateralScore(cfg api.AutopilotConfig, pt rhpv3.HostPriceTable, allocatio cutoffMultiplier := uint64(4) if expectedCollateral.Cmp(cutoff) < 0 { - return 0 // expectedCollateral <= cutoff -> score is 0 + return math.SmallestNonzeroFloat64 // expectedCollateral <= cutoff -> score is basically 0 } else if expectedCollateral.Cmp(cutoff.Mul64(cutoffMultiplier)) >= 0 { return 1 // expectedCollateral is 10x cutoff -> score is 1 } else { diff --git a/build/env_default.go b/build/env_default.go index 8820ec6ae..83003de60 100644 --- a/build/env_default.go +++ b/build/env_default.go @@ -22,7 +22,6 @@ var ( // configured with on startup. These values can be adjusted using the // settings API. DefaultGougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(1), // 1 SC per contract MaxDownloadPrice: types.Siacoins(3000), // 3000 SC per 1 TiB diff --git a/internal/test/config.go b/internal/test/config.go index 7553fa16d..68a5fff5b 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -39,7 +39,6 @@ var ( } GougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(10), // 10 SC per contract MaxDownloadPrice: types.Siacoins(1).Mul64(1000), // 1000 SC per 1 TiB diff --git a/worker/gouging.go b/worker/gouging.go index a7b2078a1..e8e362040 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -171,14 +171,6 @@ func checkPriceGougingHS(gs api.GougingSettings, hs *rhpv2.HostSettings) error { return fmt.Errorf("contract price exceeds max: %v > %v", hs.ContractPrice, gs.MaxContractPrice) } - // check max collateral - if hs.MaxCollateral.IsZero() { - return errors.New("MaxCollateral of host is 0") - } - if hs.MaxCollateral.Cmp(gs.MinMaxCollateral) < 0 { - return fmt.Errorf("MaxCollateral is below minimum: %v < %v", hs.MaxCollateral, gs.MinMaxCollateral) - } - // check max EA balance if hs.MaxEphemeralAccountBalance.Cmp(gs.MinMaxEphemeralAccountBalance) < 0 { return fmt.Errorf("'MaxEphemeralAccountBalance' is less than the allowed minimum value, %v < %v", hs.MaxEphemeralAccountBalance, gs.MinMaxEphemeralAccountBalance) @@ -219,10 +211,6 @@ func checkPriceGougingPT(gs api.GougingSettings, cs api.ConsensusState, txnFee t if pt.MaxCollateral.IsZero() { return errors.New("MaxCollateral of host is 0") } - if pt.MaxCollateral.Cmp(gs.MinMaxCollateral) < 0 { - return fmt.Errorf("MaxCollateral is below minimum: %v < %v", pt.MaxCollateral, gs.MinMaxCollateral) - } - // check ReadLengthCost - should be 1H as it's unused by hosts if types.NewCurrency64(1).Cmp(pt.ReadLengthCost) < 0 { return fmt.Errorf("ReadLengthCost of host is %v but should be %v", pt.ReadLengthCost, types.NewCurrency64(1)) From b8c56488ef95b46f78ede30dc8586040b2ec4d2a Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 18 Mar 2024 17:09:12 +0100 Subject: [PATCH 059/201] stores: add dbHostInfo --- api/autopilot.go | 66 ----- api/host.go | 106 +++++++- bus/bus.go | 67 ++++++ stores/hostdb.go | 226 ++++++++++++++++++ stores/hostdb_test.go | 122 ++++++++++ stores/migrations.go | 6 + .../mysql/main/migration_00007_host_info.sql | 54 +++++ stores/migrations/mysql/main/schema.sql | 55 +++++ .../sqlite/main/migration_00007_host_info.sql | 54 +++++ stores/migrations/sqlite/main/schema.sql | 20 ++ 10 files changed, 703 insertions(+), 73 deletions(-) create mode 100644 stores/migrations/mysql/main/migration_00007_host_info.sql create mode 100644 stores/migrations/sqlite/main/migration_00007_host_info.sql diff --git a/api/autopilot.go b/api/autopilot.go index fdd6c4942..b20696fa9 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -2,8 +2,6 @@ package api import ( "errors" - "fmt" - "strings" "go.sia.tech/core/types" "go.sia.tech/renterd/hostdb" @@ -136,68 +134,4 @@ type ( Usable bool `json:"usable"` UnusableReasons []string `json:"unusableReasons"` } - - HostGougingBreakdown struct { - ContractErr string `json:"contractErr"` - DownloadErr string `json:"downloadErr"` - GougingErr string `json:"gougingErr"` - PruneErr string `json:"pruneErr"` - UploadErr string `json:"uploadErr"` - } - - HostScoreBreakdown struct { - Age float64 `json:"age"` - Collateral float64 `json:"collateral"` - Interactions float64 `json:"interactions"` - StorageRemaining float64 `json:"storageRemaining"` - Uptime float64 `json:"uptime"` - Version float64 `json:"version"` - Prices float64 `json:"prices"` - } ) - -func (sb HostScoreBreakdown) String() string { - return fmt.Sprintf("Age: %v, Col: %v, Int: %v, SR: %v, UT: %v, V: %v, Pr: %v", sb.Age, sb.Collateral, sb.Interactions, sb.StorageRemaining, sb.Uptime, sb.Version, sb.Prices) -} - -func (hgb HostGougingBreakdown) Gouging() bool { - for _, err := range []string{ - hgb.ContractErr, - hgb.DownloadErr, - hgb.GougingErr, - hgb.PruneErr, - hgb.UploadErr, - } { - if err != "" { - return true - } - } - return false -} - -func (hgb HostGougingBreakdown) String() string { - var reasons []string - for _, errStr := range []string{ - hgb.ContractErr, - hgb.DownloadErr, - hgb.GougingErr, - hgb.PruneErr, - hgb.UploadErr, - } { - if errStr != "" { - reasons = append(reasons, errStr) - } - } - return strings.Join(reasons, ";") -} - -func (sb HostScoreBreakdown) Score() float64 { - return sb.Age * sb.Collateral * sb.Interactions * sb.StorageRemaining * sb.Uptime * sb.Version * sb.Prices -} - -func (c AutopilotConfig) Validate() error { - if c.Hosts.MaxDowntimeHours > 99*365*24 { - return ErrMaxDowntimeHoursTooHigh - } - return nil -} diff --git a/api/host.go b/api/host.go index aea80a9fe..05905d5b3 100644 --- a/api/host.go +++ b/api/host.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "net/url" + "strings" "go.sia.tech/core/types" "go.sia.tech/renterd/hostdb" @@ -23,6 +24,10 @@ var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") + + // ErrHostInfoNotFound is returned when host info can't be retrieved from + // the database. + ErrHostInfoNotFound = errors.New("host info doesn't exist in hostdb") ) type ( @@ -42,6 +47,7 @@ type ( MinRecentScanFailures uint64 `json:"minRecentScanFailures"` } + // SearchHostsRequest is the request type for the /hosts endpoint. SearchHostsRequest struct { Offset int `json:"offset"` Limit int `json:"limit"` @@ -50,6 +56,14 @@ type ( AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` } + + // UpdateHostInfoRequest is the request type for the PUT + // /autopilot/:id/host/:hostkey endpoint. + UpdateHostInfoRequest struct { + Gouging HostGougingBreakdown `json:"gouging"` + Score HostScoreBreakdown `json:"score"` + Usability HostUsabilityBreakdown `json:"usability"` + } ) type ( @@ -88,13 +102,6 @@ type ( } ) -func DefaultSearchHostOptions() SearchHostOptions { - return SearchHostOptions{ - Limit: -1, - FilterMode: HostFilterModeAll, - } -} - func (opts GetHostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) @@ -115,3 +122,88 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { values.Set("lastScan", fmt.Sprint(TimeRFC3339(opts.MaxLastScan))) } } + +type ( + HostInfo struct { + Host hostdb.Host `json:"host"` + Gouging HostGougingBreakdown `json:"gouging"` + Score HostScoreBreakdown `json:"score"` + Usability HostUsabilityBreakdown `json:"usability"` + } + + HostGougingBreakdown struct { + ContractErr string `json:"contractErr"` + DownloadErr string `json:"downloadErr"` + GougingErr string `json:"gougingErr"` + PruneErr string `json:"pruneErr"` + UploadErr string `json:"uploadErr"` + } + + HostScoreBreakdown struct { + Age float64 `json:"age"` + Collateral float64 `json:"collateral"` + Interactions float64 `json:"interactions"` + StorageRemaining float64 `json:"storageRemaining"` + Uptime float64 `json:"uptime"` + Version float64 `json:"version"` + Prices float64 `json:"prices"` + } + + HostUsabilityBreakdown struct { + Blocked bool `json:"blocked"` + Offline bool `json:"offline"` + LowScore bool `json:"lowScore"` + RedundantIP bool `json:"redundantIP"` + Gouging bool `json:"gouging"` + NotAcceptingContracts bool `json:"notAcceptingContracts"` + NotAnnounced bool `json:"notAnnounced"` + NotCompletingScan bool `json:"notCompletingScan"` + Unknown bool `json:"unknown"` + } +) + +func (sb HostScoreBreakdown) String() string { + return fmt.Sprintf("Age: %v, Col: %v, Int: %v, SR: %v, UT: %v, V: %v, Pr: %v", sb.Age, sb.Collateral, sb.Interactions, sb.StorageRemaining, sb.Uptime, sb.Version, sb.Prices) +} + +func (hgb HostGougingBreakdown) Gouging() bool { + for _, err := range []string{ + hgb.ContractErr, + hgb.DownloadErr, + hgb.GougingErr, + hgb.PruneErr, + hgb.UploadErr, + } { + if err != "" { + return true + } + } + return false +} + +func (hgb HostGougingBreakdown) String() string { + var reasons []string + for _, errStr := range []string{ + hgb.ContractErr, + hgb.DownloadErr, + hgb.GougingErr, + hgb.PruneErr, + hgb.UploadErr, + } { + if errStr != "" { + reasons = append(reasons, errStr) + } + } + return strings.Join(reasons, ";") +} + +func (sb HostScoreBreakdown) Score() float64 { + return sb.Age * sb.Collateral * sb.Interactions * sb.StorageRemaining * sb.Uptime * sb.Version * sb.Prices +} + +func (c AutopilotConfig) Validate() error { + if c.Hosts.MaxDowntimeHours > 99*365*24 { + return ErrMaxDowntimeHoursTooHigh + } + return nil +} diff --git a/bus/bus.go b/bus/bus.go index 05770eb96..c8875a092 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -104,6 +104,10 @@ type ( HostBlocklist(ctx context.Context) ([]string, error) UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error UpdateHostBlocklistEntries(ctx context.Context, add, remove []string, clear bool) error + + HostInfo(ctx context.Context, autopilotID string, hk types.PublicKey) (api.HostInfo, error) + HostInfos(ctx context.Context, autopilotID string) ([]api.HostInfo, error) + UpdateHostInfo(ctx context.Context, autopilotID string, hk types.PublicKey, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) error } // A MetadataStore stores information about contracts and objects. @@ -254,6 +258,10 @@ func (b *bus) Handler() http.Handler { "GET /autopilot/:id": b.autopilotsHandlerGET, "PUT /autopilot/:id": b.autopilotsHandlerPUT, + "GET /autopilot/:id/hosts": b.autopilotHostInfosHandlerGET, + "GET /autopilot/:id/host/:hostkey": b.autopilotHostInfoHandlerGET, + "PUT /autopilot/:id/host/:hostkey": b.autopilotHostInfoHandlerPUT, + "GET /buckets": b.bucketsHandlerGET, "POST /buckets": b.bucketsHandlerPOST, "PUT /bucket/:name/policy": b.bucketsHandlerPolicyPUT, @@ -1961,6 +1969,65 @@ func (b *bus) autopilotsHandlerPUT(jc jape.Context) { jc.Check("failed to update autopilot", b.as.UpdateAutopilot(jc.Request.Context(), ap)) } +func (b *bus) autopilotHostInfoHandlerGET(jc jape.Context) { + var id string + if jc.DecodeParam("id", &id) != nil { + return + } + var hostKey types.PublicKey + if jc.DecodeParam("hostkey", &hostKey) != nil { + return + } + + hi, err := b.hdb.HostInfo(jc.Request.Context(), id, hostKey) + if errors.Is(err, api.ErrAutopilotNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("failed to fetch host info", err) != nil { + return + } + jc.Encode(hi) +} + +func (b *bus) autopilotHostInfosHandlerGET(jc jape.Context) { + var id string + if jc.DecodeParam("id", &id) != nil { + return + } + + his, err := b.hdb.HostInfos(jc.Request.Context(), id) + if errors.Is(err, api.ErrAutopilotNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("failed to fetch host infos", err) != nil { + return + } + jc.Encode(his) +} + +func (b *bus) autopilotHostInfoHandlerPUT(jc jape.Context) { + var id string + if jc.DecodeParam("id", &id) != nil { + return + } + var hostKey types.PublicKey + if jc.DecodeParam("hostkey", &hostKey) != nil { + return + } + var hir api.UpdateHostInfoRequest + if jc.Check("failed to decode host info", jc.Decode(&hir)) != nil { + return + } + + err := b.hdb.UpdateHostInfo(jc.Request.Context(), id, hostKey, hir.Gouging, hir.Score, hir.Usability) + if errors.Is(err, api.ErrAutopilotNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("failed to update host info", err) != nil { + return + } +} + func (b *bus) contractTaxHandlerGET(jc jape.Context) { var payout types.Currency if jc.DecodeParam("payout", (*api.ParamCurrency)(&payout)) != nil { diff --git a/stores/hostdb.go b/stores/hostdb.go index 37aa18ee8..0a293f81f 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -80,6 +80,45 @@ type ( Blocklist []dbBlocklistEntry `gorm:"many2many:host_blocklist_entry_hosts;constraint:OnDelete:CASCADE"` } + // dbHostInfo contains information about a host that is collected and used + // by the autopilot. + dbHostInfo struct { + Model + + DBAutopilotID uint `gorm:"index:idx_host_infos_id,unique"` + DBAutopilot dbAutopilot + + DBHostID uint `gorm:"index:idx_host_infos_id,unique"` + DBHost dbHost + + // usability + UsabilityBlocked bool `gorm:"index:idx_host_infos_usability_blocked"` + UsabilityOffline bool `gorm:"index:idx_host_infos_usability_offline"` + UsabilityLowScore bool `gorm:"index:idx_host_infos_usability_low_score"` + UsabilityRedundantIP bool `gorm:"index:idx_host_infos_usability_redundant_ip"` + UsabilityGouging bool `gorm:"index:idx_host_infos_usability_gouging"` + UsabilityNotAcceptingContracts bool `gorm:"index:idx_host_infos_usability_not_accepting_contracts"` + UsabilityNotAnnounced bool `gorm:"index:idx_host_infos_usability_not_announced"` + UsabilityNotCompletingScan bool `gorm:"index:idx_host_infos_usability_not_completing_scan"` + UsabilityUnknown bool `gorm:"index:idx_host_infos_usability_unknown"` + + // score + ScoreAge float64 `gorm:"index:idx_host_infos_score_age"` + ScoreCollateral float64 `gorm:"index:idx_host_infos_score_collateral"` + ScoreInteractions float64 `gorm:"index:idx_host_infos_score_interactions"` + ScoreStorageRemaining float64 `gorm:"index:idx_host_infos_score_storage_remaining"` + ScoreUptime float64 `gorm:"index:idx_host_infos_score_uptime"` + ScoreVersion float64 `gorm:"index:idx_host_infos_score_version"` + ScorePrices float64 `gorm:"index:idx_host_infos_score_prices"` + + // gouging + GougingContractErr string + GougingDownloadErr string + GougingGougingErr string + GougingPruneErr string + GougingUploadErr string + } + // dbAllowlistEntry defines a table that stores the host blocklist. dbAllowlistEntry struct { Model @@ -275,6 +314,9 @@ func (dbConsensusInfo) TableName() string { return "consensus_infos" } // TableName implements the gorm.Tabler interface. func (dbHost) TableName() string { return "hosts" } +// TableName implements the gorm.Tabler interface. +func (dbHostInfo) TableName() string { return "host_infos" } + // TableName implements the gorm.Tabler interface. func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } @@ -318,6 +360,70 @@ func (h dbHost) convert() hostdb.Host { } } +func (hi dbHostInfo) convert() api.HostInfo { + return api.HostInfo{ + Host: hi.DBHost.convert(), + Gouging: api.HostGougingBreakdown{ + ContractErr: hi.GougingContractErr, + DownloadErr: hi.GougingDownloadErr, + GougingErr: hi.GougingGougingErr, + PruneErr: hi.GougingPruneErr, + UploadErr: hi.GougingUploadErr, + }, + Score: api.HostScoreBreakdown{ + Age: hi.ScoreAge, + Collateral: hi.ScoreCollateral, + Interactions: hi.ScoreInteractions, + StorageRemaining: hi.ScoreStorageRemaining, + Uptime: hi.ScoreUptime, + Version: hi.ScoreVersion, + Prices: hi.ScorePrices, + }, + Usability: api.HostUsabilityBreakdown{ + Blocked: hi.UsabilityBlocked, + Offline: hi.UsabilityOffline, + LowScore: hi.UsabilityLowScore, + RedundantIP: hi.UsabilityRedundantIP, + Gouging: hi.UsabilityGouging, + NotAcceptingContracts: hi.UsabilityNotAcceptingContracts, + NotAnnounced: hi.UsabilityNotAnnounced, + NotCompletingScan: hi.UsabilityNotCompletingScan, + Unknown: hi.UsabilityUnknown, + }, + } +} + +func convertHostInfo(apID, hID uint, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) *dbHostInfo { + return &dbHostInfo{ + DBAutopilotID: apID, + DBHostID: hID, + + UsabilityBlocked: usability.Blocked, + UsabilityOffline: usability.Offline, + UsabilityLowScore: usability.LowScore, + UsabilityRedundantIP: usability.RedundantIP, + UsabilityGouging: usability.Gouging, + UsabilityNotAcceptingContracts: usability.NotAcceptingContracts, + UsabilityNotAnnounced: usability.NotAnnounced, + UsabilityNotCompletingScan: usability.NotCompletingScan, + UsabilityUnknown: usability.Unknown, + + ScoreAge: score.Age, + ScoreCollateral: score.Collateral, + ScoreInteractions: score.Interactions, + ScoreStorageRemaining: score.StorageRemaining, + ScoreUptime: score.Uptime, + ScoreVersion: score.Version, + ScorePrices: score.Prices, + + GougingContractErr: gouging.ContractErr, + GougingDownloadErr: gouging.DownloadErr, + GougingGougingErr: gouging.GougingErr, + GougingPruneErr: gouging.PruneErr, + GougingUploadErr: gouging.UploadErr, + } +} + func (h *dbHost) BeforeCreate(tx *gorm.DB) (err error) { tx.Statement.AddClause(clause.OnConflict{ Columns: []clause.Column{{Name: "public_key"}}, @@ -443,6 +549,126 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.H }, nil } +func (ss *SQLStore) HostInfo(ctx context.Context, autopilotID string, hk types.PublicKey) (hi api.HostInfo, err error) { + err = ss.db.Transaction(func(tx *gorm.DB) error { + // fetch ap id + var apID uint + if err := tx. + Model(&dbAutopilot{}). + Where("identifier = ?", autopilotID). + Select("id"). + Take(&apID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrAutopilotNotFound + } else if err != nil { + return err + } + + // fetch host id + var hID uint + if err := tx. + Model(&dbHost{}). + Where("public_key = ?", publicKey(hk)). + Select("id"). + Take(&hID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrHostNotFound + } else if err != nil { + return err + } + + // fetch host info + var entity dbHostInfo + if err := tx. + Model(&dbHostInfo{}). + Where("db_autopilot_id = ? AND db_host_id = ?", apID, hID). + Preload("DBHost"). + First(&entity). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrHostInfoNotFound + } else if err != nil { + return err + } + + hi = entity.convert() + return nil + }) + return +} + +func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string) (his []api.HostInfo, err error) { + err = ss.db.Transaction(func(tx *gorm.DB) error { + // fetch ap id + var apID uint + if err := tx. + Model(&dbAutopilot{}). + Where("identifier = ?", autopilotID). + Select("id"). + Take(&apID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrAutopilotNotFound + } else if err != nil { + return err + } + + // fetch host info + var infos []dbHostInfo + if err := tx. + Model(&dbHostInfo{}). + Where("db_autopilot_id = ?", apID). + Preload("DBHost"). + Find(&infos). + Error; err != nil { + return err + } + for _, hi := range infos { + his = append(his, hi.convert()) + } + return nil + }) + return +} + +func (ss *SQLStore) UpdateHostInfo(ctx context.Context, autopilotID string, hk types.PublicKey, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) (err error) { + err = ss.db.Transaction(func(tx *gorm.DB) error { + // fetch ap id + var apID uint + if err := tx. + Model(&dbAutopilot{}). + Where("identifier = ?", autopilotID). + Select("id"). + Take(&apID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrAutopilotNotFound + } else if err != nil { + return err + } + + // fetch host id + var hID uint + if err := tx. + Model(&dbHost{}). + Where("public_key = ?", publicKey(hk)). + Select("id"). + Take(&hID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrHostNotFound + } else if err != nil { + return err + } + + // update host info + return tx. + Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "db_autopilot_id"}, {Name: "db_host_id"}}, + UpdateAll: true, + }). + Create(convertHostInfo(apID, hID, gouging, score, usability)). + Error + }) + return +} + // HostsForScanning returns the address of hosts for scanning. func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) { if offset < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 35872ea2d..06692ad8c 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -1064,6 +1064,95 @@ func TestAnnouncementMaxAge(t *testing.T) { } } +func TestHostInfo(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // fetch info for a non-existing autopilot + _, err := ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if !errors.Is(err, api.ErrAutopilotNotFound) { + t.Fatal(err) + } + + // add autopilot + err = ss.UpdateAutopilot(context.Background(), api.Autopilot{ID: "foo"}) + if err != nil { + t.Fatal(err) + } + + // fetch info for a non-existing host + _, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if !errors.Is(err, api.ErrHostNotFound) { + t.Fatal(err) + } + + // add host + err = ss.addTestHost(types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } + h, err := ss.Host(context.Background(), types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } + + // fetch non-existing info + _, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if !errors.Is(err, api.ErrHostInfoNotFound) { + t.Fatal(err) + } + + // add host info + want := newTestHostInfo(h.Host) + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + + // fetch info + got, err := ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(got, want) { + t.Fatal("mismatch", cmp.Diff(got, want)) + } + + // update info + want.Score.Age = 0 + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + + // fetch info + got, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(got, want) { + t.Fatal("mismatch") + } + + // add another host info + err = ss.addTestHost(types.PublicKey{2}) + if err != nil { + t.Fatal(err) + } + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{2}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + + // fetch all infos for autopilot + his, err := ss.HostInfos(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } else if len(his) != 2 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{1}) || his[1].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } +} + // addTestHosts adds 'n' hosts to the db and returns their keys. func (s *SQLStore) addTestHosts(n int) (keys []types.PublicKey, err error) { cnt, err := s.contractsCount() @@ -1156,3 +1245,36 @@ func newTestTransaction(ha modules.HostAnnouncement, sk types.PrivateKey) stypes buf.Write(encoding.Marshal(sk.SignHash(types.Hash256(crypto.HashObject(ha))))) return stypes.Transaction{ArbitraryData: [][]byte{buf.Bytes()}} } + +func newTestHostInfo(h hostdb.Host) api.HostInfo { + return api.HostInfo{ + Host: h, + Gouging: api.HostGougingBreakdown{ + ContractErr: "foo", + DownloadErr: "bar", + GougingErr: "baz", + PruneErr: "qux", + UploadErr: "quuz", + }, + Score: api.HostScoreBreakdown{ + Age: .1, + Collateral: .2, + Interactions: .3, + StorageRemaining: .4, + Uptime: .5, + Version: .6, + Prices: .7, + }, + Usability: api.HostUsabilityBreakdown{ + Blocked: true, + Offline: true, + LowScore: true, + RedundantIP: true, + Gouging: true, + NotAcceptingContracts: true, + NotAnnounced: true, + NotCompletingScan: true, + Unknown: true, + }, + } +} diff --git a/stores/migrations.go b/stores/migrations.go index b0304090e..e1d298ab8 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -62,6 +62,12 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return performMigration(tx, dbIdentifier, "00006_idx_objects_created_at", logger) }, }, + { + ID: "00007_host_info", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00007_host_info", logger) + }, + }, } // Create migrator. diff --git a/stores/migrations/mysql/main/migration_00007_host_info.sql b/stores/migrations/mysql/main/migration_00007_host_info.sql new file mode 100644 index 000000000..69864b3e1 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00007_host_info.sql @@ -0,0 +1,54 @@ +-- dbHostInfo +CREATE TABLE `host_infos` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + + `db_autopilot_id` bigint unsigned NOT NULL, + `db_host_id` bigint unsigned NOT NULL, + + `usability_blocked` boolean NOT NULL DEFAULT false, + `usability_offline` boolean NOT NULL DEFAULT false, + `usability_low_score` boolean NOT NULL DEFAULT false, + `usability_redundant_ip` boolean NOT NULL DEFAULT false, + `usability_gouging` boolean NOT NULL DEFAULT false, + `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, + `usability_not_announced` boolean NOT NULL DEFAULT false, + `usability_not_completing_scan` boolean NOT NULL DEFAULT false, + `usability_unknown` boolean NOT NULL DEFAULT false, + + `score_age` double NOT NULL, + `score_collateral` double NOT NULL, + `score_interactions` double NOT NULL, + `score_storage_remaining` double NOT NULL, + `score_uptime` double NOT NULL, + `score_version` double NOT NULL, + `score_prices` double NOT NULL, + + `gouging_contract_err` text, + `gouging_download_err` text, + `gouging_gouging_err` text, + `gouging_prune_err` text, + `gouging_upload_err` text, + + PRIMARY KEY (`id`), + UNIQUE KEY `idx_host_infos_id` (`db_autopilot_id`, `db_host_id`), + INDEX `idx_host_infos_usability_blocked` (`usability_blocked`), + INDEX `idx_host_infos_usability_offline` (`usability_offline`), + INDEX `idx_host_infos_usability_low_score` (`usability_low_score`), + INDEX `idx_host_infos_usability_redundant_ip` (`usability_redundant_ip`), + INDEX `idx_host_infos_usability_gouging` (`usability_gouging`), + INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), + INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), + INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), + INDEX `idx_host_infos_usability_unknown` (`usability_unknown`), + INDEX `idx_host_infos_score_age` (`score_age`), + INDEX `idx_host_infos_score_collateral` (`score_collateral`), + INDEX `idx_host_infos_score_interactions` (`score_interactions`), + INDEX `idx_host_infos_score_storage_remaining` (`score_storage_remaining`), + INDEX `idx_host_infos_score_uptime` (`score_uptime`), + INDEX `idx_host_infos_score_version` (`score_version`), + INDEX `idx_host_infos_score_prices` (`score_prices`), + + CONSTRAINT `fk_host_infos_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_host_infos_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 68b42ae47..4eaa91499 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -422,5 +422,60 @@ CREATE TABLE `object_user_metadata` ( CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; +-- dbHostInfo +CREATE TABLE `host_infos` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + + `db_autopilot_id` bigint unsigned NOT NULL, + `db_host_id` bigint unsigned NOT NULL, + + `usability_blocked` boolean NOT NULL DEFAULT false, + `usability_offline` boolean NOT NULL DEFAULT false, + `usability_low_score` boolean NOT NULL DEFAULT false, + `usability_redundant_ip` boolean NOT NULL DEFAULT false, + `usability_gouging` boolean NOT NULL DEFAULT false, + `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, + `usability_not_announced` boolean NOT NULL DEFAULT false, + `usability_not_completing_scan` boolean NOT NULL DEFAULT false, + `usability_unknown` boolean NOT NULL DEFAULT false, + + `score_age` double NOT NULL, + `score_collateral` double NOT NULL, + `score_interactions` double NOT NULL, + `score_storage_remaining` double NOT NULL, + `score_uptime` double NOT NULL, + `score_version` double NOT NULL, + `score_prices` double NOT NULL, + + `gouging_contract_err` text, + `gouging_download_err` text, + `gouging_gouging_err` text, + `gouging_prune_err` text, + `gouging_upload_err` text, + + PRIMARY KEY (`id`), + UNIQUE KEY `idx_host_infos_id` (`db_autopilot_id`, `db_host_id`), + INDEX `idx_host_infos_usability_blocked` (`usability_blocked`), + INDEX `idx_host_infos_usability_offline` (`usability_offline`), + INDEX `idx_host_infos_usability_low_score` (`usability_low_score`), + INDEX `idx_host_infos_usability_redundant_ip` (`usability_redundant_ip`), + INDEX `idx_host_infos_usability_gouging` (`usability_gouging`), + INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), + INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), + INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), + INDEX `idx_host_infos_usability_unknown` (`usability_unknown`), + INDEX `idx_host_infos_score_age` (`score_age`), + INDEX `idx_host_infos_score_collateral` (`score_collateral`), + INDEX `idx_host_infos_score_interactions` (`score_interactions`), + INDEX `idx_host_infos_score_storage_remaining` (`score_storage_remaining`), + INDEX `idx_host_infos_score_uptime` (`score_uptime`), + INDEX `idx_host_infos_score_version` (`score_version`), + INDEX `idx_host_infos_score_prices` (`score_prices`), + + CONSTRAINT `fk_host_infos_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_host_infos_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); \ No newline at end of file diff --git a/stores/migrations/sqlite/main/migration_00007_host_info.sql b/stores/migrations/sqlite/main/migration_00007_host_info.sql new file mode 100644 index 000000000..5d425dfe7 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00007_host_info.sql @@ -0,0 +1,54 @@ +-- dbHostInfo +CREATE TABLE `host_infos` ( + `id` INTEGER PRIMARY KEY AUTOINCREMENT, + `created_at` datetime, + + `db_autopilot_id` INTEGER NOT NULL, + `db_host_id` INTEGER NOT NULL, + + `usability_blocked` INTEGER NOT NULL DEFAULT 0, + `usability_offline` INTEGER NOT NULL DEFAULT 0, + `usability_low_score` INTEGER NOT NULL DEFAULT 0, + `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, + `usability_gouging` INTEGER NOT NULL DEFAULT 0, + `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, + `usability_not_announced` INTEGER NOT NULL DEFAULT 0, + `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, + `usability_unknown` INTEGER NOT NULL DEFAULT 0, + + `score_age` REAL NOT NULL, + `score_collateral` REAL NOT NULL, + `score_interactions` REAL NOT NULL, + `score_storage_remaining` REAL NOT NULL, + `score_uptime` REAL NOT NULL, + `score_version` REAL NOT NULL, + `score_prices` REAL NOT NULL, + + `gouging_contract_err` TEXT, + `gouging_download_err` TEXT, + `gouging_gouging_err` TEXT, + `gouging_prune_err` TEXT, + `gouging_upload_err` TEXT, + + FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +); + +-- Indexes creation +CREATE UNIQUE INDEX `idx_host_infos_id` ON `host_infos` (`db_autopilot_id`, `db_host_id`); +CREATE INDEX `idx_host_infos_usability_blocked` ON `host_infos` (`usability_blocked`); +CREATE INDEX `idx_host_infos_usability_offline` ON `host_infos` (`usability_offline`); +CREATE INDEX `idx_host_infos_usability_low_score` ON `host_infos` (`usability_low_score`); +CREATE INDEX `idx_host_infos_usability_redundant_ip` ON `host_infos` (`usability_redundant_ip`); +CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_gouging`); +CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); +CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); +CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); +CREATE INDEX `idx_host_infos_usability_unknown` ON `host_infos` (`usability_unknown`); +CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); +CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); +CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); +CREATE INDEX `idx_host_infos_score_storage_remaining` ON `host_infos` (`score_storage_remaining`); +CREATE INDEX `idx_host_infos_score_uptime` ON `host_infos` (`score_uptime`); +CREATE INDEX `idx_host_infos_score_version` ON `host_infos` (`score_version`); +CREATE INDEX `idx_host_infos_score_prices` ON `host_infos` (`score_prices`); diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 9875e81e3..5ec7a2b0e 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -149,5 +149,25 @@ CREATE UNIQUE INDEX `idx_module_event_url` ON `webhooks`(`module`,`event`,`url`) CREATE TABLE `object_user_metadata` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer DEFAULT NULL,`db_multipart_upload_id` integer DEFAULT NULL,`key` text NOT NULL,`value` text, CONSTRAINT `fk_object_user_metadata` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL); CREATE UNIQUE INDEX `idx_object_user_metadata_key` ON `object_user_metadata`(`db_object_id`,`db_multipart_upload_id`,`key`); +-- dbHostInfo +CREATE TABLE `host_infos` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `created_at` datetime, `db_autopilot_id` INTEGER NOT NULL, `db_host_id` INTEGER NOT NULL, `usability_blocked` INTEGER NOT NULL DEFAULT 0, `usability_offline` INTEGER NOT NULL DEFAULT 0, `usability_low_score` INTEGER NOT NULL DEFAULT 0, `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, `usability_gouging` INTEGER NOT NULL DEFAULT 0, `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, `usability_not_announced` INTEGER NOT NULL DEFAULT 0, `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, `usability_unknown` INTEGER NOT NULL DEFAULT 0, `score_age` REAL NOT NULL, `score_collateral` REAL NOT NULL, `score_interactions` REAL NOT NULL, `score_storage_remaining` REAL NOT NULL, `score_uptime` REAL NOT NULL, `score_version` REAL NOT NULL, `score_prices` REAL NOT NULL, `gouging_contract_err` TEXT, `gouging_download_err` TEXT, `gouging_gouging_err` TEXT, `gouging_prune_err` TEXT, `gouging_upload_err` TEXT, FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE); +CREATE UNIQUE INDEX `idx_host_infos_id` ON `host_infos` (`db_autopilot_id`, `db_host_id`); +CREATE INDEX `idx_host_infos_usability_blocked` ON `host_infos` (`usability_blocked`); +CREATE INDEX `idx_host_infos_usability_offline` ON `host_infos` (`usability_offline`); +CREATE INDEX `idx_host_infos_usability_low_score` ON `host_infos` (`usability_low_score`); +CREATE INDEX `idx_host_infos_usability_redundant_ip` ON `host_infos` (`usability_redundant_ip`); +CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_gouging`); +CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); +CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); +CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); +CREATE INDEX `idx_host_infos_usability_unknown` ON `host_infos` (`usability_unknown`); +CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); +CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); +CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); +CREATE INDEX `idx_host_infos_score_storage_remaining` ON `host_infos` (`score_storage_remaining`); +CREATE INDEX `idx_host_infos_score_uptime` ON `host_infos` (`score_uptime`); +CREATE INDEX `idx_host_infos_score_version` ON `host_infos` (`score_version`); +CREATE INDEX `idx_host_infos_score_prices` ON `host_infos` (`score_prices`); + -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); From d70d47faefb45af8f635c9b44e1dea523f30b052 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 18 Mar 2024 21:28:13 +0100 Subject: [PATCH 060/201] autopilot: calculate host info and update in the bus --- api/host.go | 65 +++++++++++++ autopilot/autopilot.go | 61 ++++++++---- autopilot/contractor.go | 101 +++++++++++--------- autopilot/hostfilter.go | 156 +++++++++--------------------- autopilot/hostinfo.go | 206 ---------------------------------------- bus/bus.go | 52 +++++++--- bus/client/hosts.go | 29 ++++++ stores/hostdb.go | 117 +++++++++++++++++------ stores/hostdb_test.go | 107 ++++++++++++++++++++- worker/upload.go | 1 - 10 files changed, 468 insertions(+), 427 deletions(-) delete mode 100644 autopilot/hostinfo.go diff --git a/api/host.go b/api/host.go index 05905d5b3..18ae2cc38 100644 --- a/api/host.go +++ b/api/host.go @@ -30,6 +30,19 @@ var ( ErrHostInfoNotFound = errors.New("host info doesn't exist in hostdb") ) +var ( + ErrUsabilityHostBlocked = errors.New("host is blocked") + ErrUsabilityHostNotFound = errors.New("host not found") + ErrUsabilityHostOffline = errors.New("host is offline") + ErrUsabilityHostLowScore = errors.New("host's score is below minimum") + ErrUsabilityHostRedundantIP = errors.New("host has redundant IP") + ErrUsabilityHostPriceGouging = errors.New("host is price gouging") + ErrUsabilityHostNotAcceptingContracts = errors.New("host is not accepting contracts") + ErrUsabilityHostNotCompletingScan = errors.New("host is not completing scan") + ErrUsabilityHostNotAnnounced = errors.New("host is not announced") + ErrUsabilityUnknown = errors.New("unknown") +) + type ( // HostsScanRequest is the request type for the /hosts/scans endpoint. HostsScanRequest struct { @@ -57,6 +70,10 @@ type ( KeyIn []types.PublicKey `json:"keyIn"` } + // HostInfosRequest is the request type for the POST /autopilot/:id/hosts + // endpoint. + HostInfosRequest SearchHostsRequest + // UpdateHostInfoRequest is the request type for the PUT // /autopilot/:id/host/:hostkey endpoint. UpdateHostInfoRequest struct { @@ -100,6 +117,10 @@ type ( Limit int Offset int } + HostInfoOptions struct { + SearchHostOptions + UsabilityMode string + } ) func (opts GetHostsOptions) Apply(values url.Values) { @@ -201,6 +222,50 @@ func (sb HostScoreBreakdown) Score() float64 { return sb.Age * sb.Collateral * sb.Interactions * sb.StorageRemaining * sb.Uptime * sb.Version * sb.Prices } +func (ub HostUsabilityBreakdown) Usable() bool { + return !ub.Blocked && + !ub.Offline && + !ub.LowScore && + !ub.RedundantIP && + !ub.Gouging && + !ub.NotAcceptingContracts && + !ub.NotAnnounced && + !ub.NotCompletingScan && + !ub.Unknown +} + +func (ub HostUsabilityBreakdown) UnusableReasons() []string { + var reasons []string + if ub.Blocked { + reasons = append(reasons, ErrUsabilityHostBlocked.Error()) + } + if ub.Offline { + reasons = append(reasons, ErrUsabilityHostOffline.Error()) + } + if ub.LowScore { + reasons = append(reasons, ErrUsabilityHostLowScore.Error()) + } + if ub.RedundantIP { + reasons = append(reasons, ErrUsabilityHostRedundantIP.Error()) + } + if ub.Gouging { + reasons = append(reasons, ErrUsabilityHostPriceGouging.Error()) + } + if ub.NotAcceptingContracts { + reasons = append(reasons, ErrUsabilityHostNotAcceptingContracts.Error()) + } + if ub.NotAnnounced { + reasons = append(reasons, ErrUsabilityHostNotAnnounced.Error()) + } + if ub.NotCompletingScan { + reasons = append(reasons, ErrUsabilityHostNotCompletingScan.Error()) + } + if ub.Unknown { + reasons = append(reasons, ErrUsabilityUnknown.Error()) + } + return reasons +} + func (c AutopilotConfig) Validate() error { if c.Hosts.MaxDowntimeHours > 99*365*24 { return ErrMaxDowntimeHoursTooHigh diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index eb08c9456..2514c7a78 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -59,6 +59,10 @@ type Bus interface { RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.Host, error) + HostInfo(ctx context.Context, autopilotID string, hostKey types.PublicKey) (api.HostInfo, error) + HostInfos(ctx context.Context, autopilotID string, opts api.HostInfoOptions) ([]api.HostInfo, error) + UpdateHostInfo(ctx context.Context, autopilotID string, hostKey types.PublicKey, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) error + // metrics RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error RecordContractPruneMetric(ctx context.Context, metrics ...api.ContractPruneMetric) error @@ -685,7 +689,7 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { return } - host, err := ap.c.HostInfo(jc.Request.Context(), hostKey) + host, err := ap.bus.HostInfo(jc.Request.Context(), ap.id, hostKey) if jc.Check("failed to get host info", err) != nil { return } @@ -724,22 +728,45 @@ func (ap *Autopilot) stateHandlerGET(jc jape.Context) { } func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.SearchHostsRequest + var req api.HostInfosRequest if jc.Decode(&req) != nil { return } - hosts, err := ap.c.HostInfos(jc.Request.Context(), req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) + hosts, err := ap.bus.HostInfos(jc.Request.Context(), ap.id, api.HostInfoOptions{ + UsabilityMode: req.UsabilityMode, + SearchHostOptions: api.SearchHostOptions{ + FilterMode: req.FilterMode, + AddressContains: req.AddressContains, + KeyIn: req.KeyIn, + Offset: req.Offset, + Limit: req.Limit, + }, + }) if jc.Check("failed to get host info", err) != nil { return } - jc.Encode(hosts) + resps := make([]api.HostHandlerResponse, len(hosts)) + for i, host := range hosts { + resps[i] = api.HostHandlerResponse{ + Host: host.Host, + Checks: &api.HostHandlerResponseChecks{ + Gouging: host.Gouging.Gouging(), + GougingBreakdown: host.Gouging, + Score: host.Score.Score(), + ScoreBreakdown: host.Score, + Usable: host.Usability.Usable(), + UnusableReasons: host.Usability.UnusableReasons(), + }, + } + } + jc.Encode(resps) } func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { - usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) - if usable { + hi := calculateHostInfo(cfg, rs, gc, host, smallestValidScore, 0) + if hi.Usability.Usable() { usables++ } } @@ -754,36 +781,36 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu resp.Hosts = uint64(len(hosts)) for _, host := range hosts { - usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) - if usable { + hi := calculateHostInfo(cfg, rs, gc, host, 0, 0) + if hi.Usability.Usable() { resp.Usable++ continue } - if usableBreakdown.blocked > 0 { + if hi.Usability.Blocked { resp.Unusable.Blocked++ } - if usableBreakdown.notacceptingcontracts > 0 { + if hi.Usability.NotAcceptingContracts { resp.Unusable.NotAcceptingContracts++ } - if usableBreakdown.notcompletingscan > 0 { + if hi.Usability.NotCompletingScan { resp.Unusable.NotScanned++ } - if usableBreakdown.unknown > 0 { + if hi.Usability.Unknown { resp.Unusable.Unknown++ } - if usableBreakdown.gougingBreakdown.ContractErr != "" { + if hi.Gouging.ContractErr != "" { resp.Unusable.Gouging.Contract++ } - if usableBreakdown.gougingBreakdown.DownloadErr != "" { + if hi.Gouging.DownloadErr != "" { resp.Unusable.Gouging.Download++ } - if usableBreakdown.gougingBreakdown.GougingErr != "" { + if hi.Gouging.GougingErr != "" { resp.Unusable.Gouging.Gouging++ } - if usableBreakdown.gougingBreakdown.PruneErr != "" { + if hi.Gouging.PruneErr != "" { resp.Unusable.Gouging.Pruning++ } - if usableBreakdown.gougingBreakdown.UploadErr != "" { + if hi.Gouging.UploadErr != "" { resp.Unusable.Gouging.Upload++ } } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 8dfc702f3..810ca093c 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -101,16 +101,10 @@ type ( pruning bool pruningLastStart time.Time - cachedHostInfo map[types.PublicKey]hostInfo cachedDataStored map[types.PublicKey]uint64 cachedMinScore float64 } - hostInfo struct { - Usable bool - UnusableResult unusableHostResult - } - scoredHost struct { host hostdb.Host score float64 @@ -281,35 +275,19 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( c.logger.Warn("could not calculate min score, no hosts found") } - // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) - if err != nil { - return false, err - } - - // create gouging checker - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - - // prepare hosts for cache - hostInfos := make(map[types.PublicKey]hostInfo) - for _, h := range hosts { - // ignore the pricetable's HostBlockHeight by setting it to our own blockheight - h.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, h, minScore, hostData[h.PublicKey]) - hostInfos[h.PublicKey] = hostInfo{ - Usable: isUsable, - UnusableResult: unusableResult, - } - } - // update cache. c.mu.Lock() - c.cachedHostInfo = hostInfos c.cachedDataStored = hostData c.cachedMinScore = minScore c.mu.Unlock() - // run checks + // run host checks + err = c.runHostChecks(ctx, hosts) + if err != nil { + return false, fmt.Errorf("failed to run host checks, err: %v", err) + } + + // run contract checks updatedSet, toArchive, toStopUsing, toRefresh, toRenew, err := c.runContractChecks(ctx, w, contracts, isInCurrentSet, minScore) if err != nil { return false, fmt.Errorf("failed to run contract checks, err: %v", err) @@ -741,15 +719,15 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host, err := c.ap.bus.Host(ctx, hk) if err != nil { c.logger.Errorw(fmt.Sprintf("missing host, err: %v", err), "hk", hk) - toStopUsing[fcid] = errHostNotFound.Error() + toStopUsing[fcid] = api.ErrUsabilityHostNotFound.Error() notfound++ continue } // if the host is blocked we ignore it, it might be unblocked later if host.Blocked { - c.logger.Infow("unusable host", "hk", hk, "fcid", fcid, "reasons", errHostBlocked.Error()) - toStopUsing[fcid] = errHostBlocked.Error() + c.logger.Infow("unusable host", "hk", hk, "fcid", fcid, "reasons", api.ErrUsabilityHostBlocked.Error()) + toStopUsing[fcid] = api.ErrUsabilityHostBlocked.Error() continue } @@ -780,9 +758,9 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.Host, minScore, contract.FileSize()) - if !usable { - reasons := unusableResult.reasons() + hi := calculateHostInfo(state.cfg, state.rs, gc, host.Host, minScore, contract.FileSize()) + if !hi.Usability.Usable() { + reasons := hi.Usability.UnusableReasons() toStopUsing[fcid] = strings.Join(reasons, ",") c.logger.Infow("unusable host", "hk", hk, "fcid", fcid, "reasons", reasons) continue @@ -795,7 +773,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts if _, found := inCurrentSet[fcid]; !found || remainingKeepLeeway == 0 { toStopUsing[fcid] = errContractNoRevision.Error() } else if !state.cfg.Hosts.AllowRedundantIPs && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { - toStopUsing[fcid] = fmt.Sprintf("%v; %v", errHostRedundantIP, errContractNoRevision) + toStopUsing[fcid] = fmt.Sprintf("%v; %v", api.ErrUsabilityHostRedundantIP, errContractNoRevision) } else { toKeep = append(toKeep, contract.ContractMetadata) remainingKeepLeeway-- // we let it slide @@ -847,7 +825,38 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { +func (c *contractor) runHostChecks(ctx context.Context, hosts []hostdb.Host) error { + // convenience variables + state := c.ap.State() + + // fetch consensus state + cs, err := c.ap.bus.ConsensusState(ctx) + if err != nil { + return err + } + + // create gouging checker + gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + + // grab min score and host data from cache + c.mu.Lock() + minScore := c.cachedMinScore + hostData := c.cachedDataStored + c.mu.Unlock() + + // update host info + for _, h := range hosts { + h.PriceTable.HostBlockHeight = cs.BlockHeight // ignore HostBlockHeight + hi := calculateHostInfo(state.cfg, state.rs, gc, h, minScore, hostData[h.PublicKey]) + err := c.ap.bus.UpdateHostInfo(ctx, c.ap.id, h.PublicKey, hi.Gouging, hi.Score, hi.Usability) + if err != nil { + c.logger.Errorw(fmt.Sprintf("failed to update host info, err: %v", err), "hk", h.PublicKey) + } + } + return nil +} + +func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostsBreakdown, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { if c.ap.isStopped() { return nil, nil } @@ -1300,13 +1309,13 @@ func (c *contractor) calculateMinScore(ctx context.Context, candidates []scoredH return minScore } -func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostsBreakdown, error) { start := time.Now() // fetch consensus state cs, err := c.ap.bus.ConsensusState(ctx) if err != nil { - return nil, unusableHostResult{}, err + return nil, unusableHostsBreakdown{}, err } // create a gouging checker @@ -1336,7 +1345,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us "used", len(usedHosts)) // score all unused hosts - var unusableHostResult unusableHostResult + var unusableHosts unusableHostsBreakdown var unusable, zeros int var candidates []scoredHost for _, h := range unused { @@ -1349,15 +1358,15 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us // NOTE: ignore the pricetable's HostBlockHeight by setting it to our // own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - usable, result := isUsableHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) - if usable { - candidates = append(candidates, scoredHost{h, result.scoreBreakdown.Score()}) + hi := calculateHostInfo(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) + if hi.Usability.Usable() { + candidates = append(candidates, scoredHost{h, hi.Score.Score()}) continue } // keep track of unusable host results - unusableHostResult.merge(result) - if result.scoreBreakdown.Score() == 0 { + unusableHosts.track(hi.Usability) + if hi.Score.Score() == 0 { zeros++ } unusable++ @@ -1368,7 +1377,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us "unusable", unusable, "used", len(usedHosts)) - return candidates, unusableHostResult, nil + return candidates, unusableHosts, nil } func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 574862a97..462be4588 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "math/big" - "strings" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" @@ -32,16 +31,6 @@ const ( ) var ( - errHostBlocked = errors.New("host is blocked") - errHostNotFound = errors.New("host not found") - errHostOffline = errors.New("host is offline") - errLowScore = errors.New("host's score is below minimum") - errHostRedundantIP = errors.New("host has redundant IP") - errHostPriceGouging = errors.New("host is price gouging") - errHostNotAcceptingContracts = errors.New("host is not accepting contracts") - errHostNotCompletingScan = errors.New("host is not completing scan") - errHostNotAnnounced = errors.New("host is not announced") - errContractOutOfCollateral = errors.New("contract is out of collateral") errContractOutOfFunds = errors.New("contract is out of funds") errContractUpForRenewal = errors.New("contract is up for renewal") @@ -51,7 +40,7 @@ var ( errContractNotConfirmed = errors.New("contract hasn't been confirmed on chain in time") ) -type unusableHostResult struct { +type unusableHostsBreakdown struct { blocked uint64 offline uint64 lowscore uint64 @@ -61,99 +50,39 @@ type unusableHostResult struct { notannounced uint64 notcompletingscan uint64 unknown uint64 - - // gougingBreakdown is mostly ignored, we overload the unusableHostResult - // with a gouging breakdown to be able to return it in the host infos - // endpoint `/hosts/:hostkey` - gougingBreakdown api.HostGougingBreakdown - - // scoreBreakdown is mostly ignored, we overload the unusableHostResult with - // a score breakdown to be able to return it in the host infos endpoint - // `/hosts/:hostkey` - scoreBreakdown api.HostScoreBreakdown } -func newUnusableHostResult(errs []error, gougingBreakdown api.HostGougingBreakdown, scoreBreakdown api.HostScoreBreakdown) (u unusableHostResult) { - for _, err := range errs { - if errors.Is(err, errHostBlocked) { - u.blocked++ - } else if errors.Is(err, errHostOffline) { - u.offline++ - } else if errors.Is(err, errLowScore) { - u.lowscore++ - } else if errors.Is(err, errHostRedundantIP) { - u.redundantip++ - } else if errors.Is(err, errHostPriceGouging) { - u.gouging++ - } else if errors.Is(err, errHostNotAcceptingContracts) { - u.notacceptingcontracts++ - } else if errors.Is(err, errHostNotAnnounced) { - u.notannounced++ - } else if errors.Is(err, errHostNotCompletingScan) { - u.notcompletingscan++ - } else { - u.unknown++ - } +func (u *unusableHostsBreakdown) track(ub api.HostUsabilityBreakdown) { + if ub.Blocked { + u.blocked++ } - - u.gougingBreakdown = gougingBreakdown - u.scoreBreakdown = scoreBreakdown - return -} - -func (u unusableHostResult) String() string { - return fmt.Sprintf("host is unusable because of the following reasons: %v", strings.Join(u.reasons(), ", ")) -} - -func (u unusableHostResult) reasons() []string { - var reasons []string - if u.blocked > 0 { - reasons = append(reasons, errHostBlocked.Error()) + if ub.Offline { + u.offline++ } - if u.offline > 0 { - reasons = append(reasons, errHostOffline.Error()) + if ub.LowScore { + u.lowscore++ } - if u.lowscore > 0 { - reasons = append(reasons, errLowScore.Error()) + if ub.RedundantIP { + u.redundantip++ } - if u.redundantip > 0 { - reasons = append(reasons, errHostRedundantIP.Error()) + if ub.Gouging { + u.gouging++ } - if u.gouging > 0 { - reasons = append(reasons, errHostPriceGouging.Error()) + if ub.NotAcceptingContracts { + u.notacceptingcontracts++ } - if u.notacceptingcontracts > 0 { - reasons = append(reasons, errHostNotAcceptingContracts.Error()) + if ub.NotAnnounced { + u.notannounced++ } - if u.notannounced > 0 { - reasons = append(reasons, errHostNotAnnounced.Error()) + if ub.NotCompletingScan { + u.notcompletingscan++ } - if u.notcompletingscan > 0 { - reasons = append(reasons, errHostNotCompletingScan.Error()) + if ub.Unknown { + u.unknown++ } - if u.unknown > 0 { - reasons = append(reasons, "unknown") - } - return reasons } -func (u *unusableHostResult) merge(other unusableHostResult) { - u.blocked += other.blocked - u.offline += other.offline - u.lowscore += other.lowscore - u.redundantip += other.redundantip - u.gouging += other.gouging - u.notacceptingcontracts += other.notacceptingcontracts - u.notannounced += other.notannounced - u.notcompletingscan += other.notcompletingscan - u.unknown += other.unknown - - // scoreBreakdown is not merged - // - // gougingBreakdown is not merged -} - -func (u *unusableHostResult) keysAndValues() []interface{} { +func (u *unusableHostsBreakdown) keysAndValues() []interface{} { values := []interface{}{ "blocked", u.blocked, "offline", u.offline, @@ -174,36 +103,38 @@ func (u *unusableHostResult) keysAndValues() []interface{} { return values } -// isUsableHost returns whether the given host is usable along with a list of -// reasons why it was deemed unusable. -func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.Host, minScore float64, storedData uint64) (bool, unusableHostResult) { +// calculateHostInfo returns the host info for the given host. This function will +func calculateHostInfo(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.Host, minScore float64, storedData uint64) api.HostInfo { + // sanity check redundancy settings if rs.Validate() != nil { panic("invalid redundancy settings were supplied - developer error") } - var errs []error - var gougingBreakdown api.HostGougingBreakdown - var scoreBreakdown api.HostScoreBreakdown + // prepare host breakdown fields + var ub api.HostUsabilityBreakdown + var gb api.HostGougingBreakdown + var sb api.HostScoreBreakdown + // populate host info fields if !h.IsAnnounced() { - errs = append(errs, errHostNotAnnounced) + ub.NotAnnounced = true } else if !h.Scanned { - errs = append(errs, errHostNotCompletingScan) + ub.NotCompletingScan = true } else { // online check if !h.IsOnline() { - errs = append(errs, errHostOffline) + ub.Offline = true } // accepting contracts check if !h.Settings.AcceptingContracts { - errs = append(errs, errHostNotAcceptingContracts) + ub.NotAcceptingContracts = true } // perform gouging checks - gougingBreakdown = gc.Check(&h.Settings, &h.PriceTable.HostPriceTable) - if gougingBreakdown.Gouging() { - errs = append(errs, fmt.Errorf("%w: %v", errHostPriceGouging, gougingBreakdown)) + gb = gc.Check(&h.Settings, &h.PriceTable.HostPriceTable) + if gb.Gouging() { + ub.Gouging = true } else if minScore > 0 { // perform scoring checks // @@ -211,14 +142,19 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. // not gouging, this because the core package does not have overflow // checks in its cost calculations needed to calculate the period // cost - scoreBreakdown = hostScore(cfg, h, storedData, rs.Redundancy()) - if scoreBreakdown.Score() < minScore { - errs = append(errs, fmt.Errorf("%w: (%s): %v < %v", errLowScore, scoreBreakdown.String(), scoreBreakdown.Score(), minScore)) + sb = hostScore(cfg, h, storedData, rs.Redundancy()) + if sb.Score() < minScore { + ub.LowScore = true } } } - return len(errs) == 0, newUnusableHostResult(errs, gougingBreakdown, scoreBreakdown) + return api.HostInfo{ + Host: h, + Usability: ub, + Gouging: gb, + Score: sb, + } } // isUsableContract returns whether the given contract is @@ -269,7 +205,7 @@ func (c *contractor) isUsableContract(cfg api.AutopilotConfig, state state, ci c // IP check should be last since it modifies the filter shouldFilter := !cfg.Hosts.AllowRedundantIPs && (usable || recoverable) if shouldFilter && f.IsRedundantIP(contract.HostIP, contract.HostKey) { - reasons = append(reasons, errHostRedundantIP.Error()) + reasons = append(reasons, api.ErrUsabilityHostRedundantIP.Error()) usable = false recoverable = false // do not use in the contract set, but keep it around for downloads renew = false // do not renew, but allow refreshes so the contracts stays funded diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go deleted file mode 100644 index 82efa1d61..000000000 --- a/autopilot/hostinfo.go +++ /dev/null @@ -1,206 +0,0 @@ -package autopilot - -import ( - "context" - "fmt" - - "go.sia.tech/core/types" - "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" - "go.sia.tech/renterd/worker" -) - -func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api.HostHandlerResponse, error) { - state := c.ap.State() - - if state.cfg.Contracts.Allowance.IsZero() { - return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contracts allowance is zero") - } - if state.cfg.Contracts.Amount == 0 { - return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contracts amount is zero") - } - if state.cfg.Contracts.Period == 0 { - return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contract period is zero") - } - - host, err := c.ap.bus.Host(ctx, hostKey) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch requested host from bus: %w", err) - } - gs, err := c.ap.bus.GougingSettings(ctx) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch gouging settings from bus: %w", err) - } - rs, err := c.ap.bus.RedundancySettings(ctx) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) - } - cs, err := c.ap.bus.ConsensusState(ctx) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch consensus state from bus: %w", err) - } - fee, err := c.ap.bus.RecommendedFee(ctx) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch recommended fee from bus: %w", err) - } - c.mu.Lock() - storedData := c.cachedDataStored[hostKey] - minScore := c.cachedMinScore - c.mu.Unlock() - - gc := worker.NewGougingChecker(gs, cs, fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - - // ignore the pricetable's HostBlockHeight by setting it to our own blockheight - host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.Host, minScore, storedData) - return api.HostHandlerResponse{ - Host: host.Host, - Checks: &api.HostHandlerResponseChecks{ - Gouging: unusableResult.gougingBreakdown.Gouging(), - GougingBreakdown: unusableResult.gougingBreakdown, - Score: unusableResult.scoreBreakdown.Score(), - ScoreBreakdown: unusableResult.scoreBreakdown, - Usable: isUsable, - UnusableReasons: unusableResult.reasons(), - }, - }, nil -} - -func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.Host) (hi hostInfo, found bool) { - // grab host details from cache - c.mu.Lock() - hi, found = c.cachedHostInfo[host.PublicKey] - storedData := c.cachedDataStored[host.PublicKey] - minScore := c.cachedMinScore - c.mu.Unlock() - - // return early if the host info is not cached - if !found { - return - } - - // try and refresh the host info if it got scanned in the meantime, this - // inconsistency would resolve itself but trying to update it here improves - // first time user experience - if host.Scanned && hi.UnusableResult.notcompletingscan > 0 { - cs, err := c.ap.bus.ConsensusState(ctx) - if err != nil { - c.logger.Error("failed to fetch consensus state from bus: %v", err) - } else { - state := c.ap.State() - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, storedData) - hi = hostInfo{ - Usable: isUsable, - UnusableResult: unusableResult, - } - - // update cache - c.mu.Lock() - c.cachedHostInfo[host.PublicKey] = hi - c.mu.Unlock() - } - } - - return -} - -func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostHandlerResponse, error) { - // declare helper to decide whether to keep a host. - if !isValidUsabilityFilterMode(usabilityMode) { - return nil, fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) - } - - keep := func(usable bool) bool { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - return usable // keep usable - case api.UsabilityFilterModeUnusable: - return !usable // keep unusable - case api.UsabilityFilterModeAll: - return true // keep all - case "": - return true // keep all - default: - panic("unreachable") - } - } - - var hostInfos []api.HostHandlerResponse - wanted := limit - for { - // fetch up to 'limit' hosts. - hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{ - Offset: offset, - Limit: limit, - FilterMode: filterMode, - AddressContains: addressContains, - KeyIn: keyIn, - }) - if err != nil { - return nil, err - } - offset += len(hosts) - - // if there are no more hosts, we're done. - if len(hosts) == 0 { - return hostInfos, nil // no more hosts - } - - // decide how many of the returned hosts to keep. - var keptHosts int - for _, host := range hosts { - hi, cached := c.hostInfoFromCache(ctx, host) - if !cached { - // when the filterMode is "all" we include uncached hosts and - // set IsChecked = false. - if usabilityMode == api.UsabilityFilterModeAll { - hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, - }) - if wanted > 0 && len(hostInfos) == wanted { - return hostInfos, nil // we're done. - } - keptHosts++ - } - continue - } - if !keep(hi.Usable) { - continue - } - hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, - Checks: &api.HostHandlerResponseChecks{ - Gouging: hi.UnusableResult.gougingBreakdown.Gouging(), - GougingBreakdown: hi.UnusableResult.gougingBreakdown, - Score: hi.UnusableResult.scoreBreakdown.Score(), - ScoreBreakdown: hi.UnusableResult.scoreBreakdown, - Usable: hi.Usable, - UnusableReasons: hi.UnusableResult.reasons(), - }, - }) - if wanted > 0 && len(hostInfos) == wanted { - return hostInfos, nil // we're done. - } - keptHosts++ - } - - // if no hosts were kept from this batch, double the limit. - if limit > 0 && keptHosts == 0 { - limit *= 2 - } - } -} - -func isValidUsabilityFilterMode(usabilityMode string) bool { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - case api.UsabilityFilterModeUnusable: - case api.UsabilityFilterModeAll: - case "": - default: - return false - } - return true -} diff --git a/bus/bus.go b/bus/bus.go index c8875a092..7df106f73 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -106,7 +106,7 @@ type ( UpdateHostBlocklistEntries(ctx context.Context, add, remove []string, clear bool) error HostInfo(ctx context.Context, autopilotID string, hk types.PublicKey) (api.HostInfo, error) - HostInfos(ctx context.Context, autopilotID string) ([]api.HostInfo, error) + HostInfos(ctx context.Context, autopilotID string, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostInfo, error) UpdateHostInfo(ctx context.Context, autopilotID string, hk types.PublicKey, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) error } @@ -258,9 +258,9 @@ func (b *bus) Handler() http.Handler { "GET /autopilot/:id": b.autopilotsHandlerGET, "PUT /autopilot/:id": b.autopilotsHandlerPUT, - "GET /autopilot/:id/hosts": b.autopilotHostInfosHandlerGET, "GET /autopilot/:id/host/:hostkey": b.autopilotHostInfoHandlerGET, "PUT /autopilot/:id/host/:hostkey": b.autopilotHostInfoHandlerPUT, + "POST /autopilot/:id/hosts": b.autopilotHostInfosHandlerPOST, "GET /buckets": b.bucketsHandlerGET, "POST /buckets": b.bucketsHandlerPOST, @@ -1989,43 +1989,67 @@ func (b *bus) autopilotHostInfoHandlerGET(jc jape.Context) { jc.Encode(hi) } -func (b *bus) autopilotHostInfosHandlerGET(jc jape.Context) { +func (b *bus) autopilotHostInfoHandlerPUT(jc jape.Context) { var id string if jc.DecodeParam("id", &id) != nil { return } + var hostKey types.PublicKey + if jc.DecodeParam("hostkey", &hostKey) != nil { + return + } + var hir api.UpdateHostInfoRequest + if jc.Check("failed to decode host info", jc.Decode(&hir)) != nil { + return + } - his, err := b.hdb.HostInfos(jc.Request.Context(), id) + err := b.hdb.UpdateHostInfo(jc.Request.Context(), id, hostKey, hir.Gouging, hir.Score, hir.Usability) if errors.Is(err, api.ErrAutopilotNotFound) { jc.Error(err, http.StatusNotFound) return - } else if jc.Check("failed to fetch host infos", err) != nil { + } else if jc.Check("failed to update host info", err) != nil { return } - jc.Encode(his) } -func (b *bus) autopilotHostInfoHandlerPUT(jc jape.Context) { +func (b *bus) autopilotHostInfosHandlerPOST(jc jape.Context) { var id string if jc.DecodeParam("id", &id) != nil { return } - var hostKey types.PublicKey - if jc.DecodeParam("hostkey", &hostKey) != nil { + var req api.HostInfosRequest + if jc.Decode(&req) != nil { return } - var hir api.UpdateHostInfoRequest - if jc.Check("failed to decode host info", jc.Decode(&hir)) != nil { - return + + // validate filter mode + if fm := req.FilterMode; fm != "" { + if fm != api.HostFilterModeAll && + fm != api.HostFilterModeAllowed && + fm != api.HostFilterModeBlocked { + jc.Error(fmt.Errorf("invalid filter mode: '%v', allowed values are '%s', '%s', '%s'", fm, api.HostFilterModeAll, api.HostFilterModeAllowed, api.HostFilterModeBlocked), http.StatusBadRequest) + return + } } - err := b.hdb.UpdateHostInfo(jc.Request.Context(), id, hostKey, hir.Gouging, hir.Score, hir.Usability) + // validate usability mode + if um := req.UsabilityMode; um != "" { + if um != api.UsabilityFilterModeUsable && + um != api.UsabilityFilterModeUnusable && + um != api.UsabilityFilterModeAll { + jc.Error(fmt.Errorf("invalid usability mode: '%v', allowed values are '%s', '%s', '%s'", um, api.UsabilityFilterModeAll, api.UsabilityFilterModeUsable, api.UsabilityFilterModeUnusable), http.StatusBadRequest) + return + } + } + + his, err := b.hdb.HostInfos(jc.Request.Context(), id, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if errors.Is(err, api.ErrAutopilotNotFound) { jc.Error(err, http.StatusNotFound) return - } else if jc.Check("failed to update host info", err) != nil { + } else if jc.Check("failed to fetch host infos", err) != nil { return } + jc.Encode(his) } func (b *bus) contractTaxHandlerGET(jc jape.Context) { diff --git a/bus/client/hosts.go b/bus/client/hosts.go index ecf44e52b..beda5950b 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -100,3 +100,32 @@ func (c *Client) UpdateHostBlocklist(ctx context.Context, add, remove []string, err = c.c.WithContext(ctx).PUT("/hosts/blocklist", api.UpdateBlocklistRequest{Add: add, Remove: remove, Clear: clear}) return } + +// HostInfo returns the host info for a given host and autopilot identifier. +func (c *Client) HostInfo(ctx context.Context, autopilotID string, hostKey types.PublicKey) (hostInfo api.HostInfo, err error) { + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/autopilot/%s/host/%s", autopilotID, hostKey), &hostInfo) + return +} + +// UpdateHostInfo updates the host info for a given host and autopilot identifier. +func (c *Client) UpdateHostInfo(ctx context.Context, autopilotID string, hostKey types.PublicKey, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) (err error) { + err = c.c.WithContext(ctx).PUT(fmt.Sprintf("/autopilot/%s/host/%s", autopilotID, hostKey), api.UpdateHostInfoRequest{ + Gouging: gouging, + Score: score, + Usability: usability, + }) + return +} + +// HostInfos returns the host info for all hosts known to the autopilot with the given identifier. +func (c *Client) HostInfos(ctx context.Context, autopilotID string, opts api.HostInfoOptions) (hostInfos []api.HostInfo, err error) { + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/autopilot/%s", autopilotID), api.HostInfosRequest{ + Offset: opts.Offset, + Limit: opts.Limit, + FilterMode: opts.FilterMode, + UsabilityMode: opts.UsabilityMode, + AddressContains: opts.AddressContains, + KeyIn: opts.KeyIn, + }, &hostInfos) + return +} diff --git a/stores/hostdb.go b/stores/hostdb.go index 0a293f81f..0bc1e5a8c 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -596,7 +596,11 @@ func (ss *SQLStore) HostInfo(ctx context.Context, autopilotID string, hk types.P return } -func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string) (his []api.HostInfo, err error) { +func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (his []api.HostInfo, err error) { + if offset < 0 { + return nil, ErrNegativeOffset + } + err = ss.db.Transaction(func(tx *gorm.DB) error { // fetch ap id var apID uint @@ -611,12 +615,61 @@ func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string) (his []ap return err } - // fetch host info - var infos []dbHostInfo - if err := tx. + // prepare query + query := tx. Model(&dbHostInfo{}). Where("db_autopilot_id = ?", apID). - Preload("DBHost"). + Joins("DBHost") + + // apply mode filter + switch filterMode { + case api.HostFilterModeAllowed: + query = query.Scopes(ss.excludeBlocked("DBHost")) + case api.HostFilterModeBlocked: + query = query.Scopes(ss.excludeAllowed("DBHost")) + case api.HostFilterModeAll: + // nothing to do + default: + return fmt.Errorf("invalid filter mode: %v", filterMode) + } + + // apply usability filter + switch usabilityMode { + case api.UsabilityFilterModeUsable: + query = query.Where("usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ? AND usability_unknown = ?", + false, false, false, false, false, false, false, false, false) + case api.UsabilityFilterModeUnusable: + query = query.Where("usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ? OR usability_unknown = ?", + true, true, true, true, true, true, true, true, true) + case api.UsabilityFilterModeAll: + // nothing to do + default: + return fmt.Errorf("invalid usability mode: %v", usabilityMode) + } + + // apply address filter + if addressContains != "" { + query = query.Scopes(func(d *gorm.DB) *gorm.DB { + return d.Where("net_address LIKE ?", "%"+addressContains+"%") + }) + } + + // apply key filter + if len(keyIn) > 0 { + pubKeys := make([]publicKey, len(keyIn)) + for i, pk := range keyIn { + pubKeys[i] = publicKey(pk) + } + query = query.Scopes(func(d *gorm.DB) *gorm.DB { + return d.Where("public_key IN ?", pubKeys) + }) + } + + // fetch host info + var infos []dbHostInfo + if err := query. + Offset(offset). + Limit(limit). Find(&infos). Error; err != nil { return err @@ -715,9 +768,9 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains query := ss.db switch filterMode { case api.HostFilterModeAllowed: - query = query.Scopes(ss.excludeBlocked) + query = query.Scopes(ss.excludeBlocked("hosts")) case api.HostFilterModeBlocked: - query = query.Scopes(ss.excludeAllowed) + query = query.Scopes(ss.excludeAllowed("hosts")) case api.HostFilterModeAll: // nothing to do default: @@ -1157,37 +1210,41 @@ func (ss *SQLStore) processConsensusChangeHostDB(cc modules.ConsensusChange) { // excludeBlocked can be used as a scope for a db transaction to exclude blocked // hosts. -func (ss *SQLStore) excludeBlocked(db *gorm.DB) *gorm.DB { - ss.mu.Lock() - defer ss.mu.Unlock() +func (ss *SQLStore) excludeBlocked(alias string) func(db *gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + ss.mu.Lock() + defer ss.mu.Unlock() - if ss.hasAllowlist { - db = db.Where("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if ss.hasBlocklist { - db = db.Where("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") + if ss.hasAllowlist { + db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + if ss.hasBlocklist { + db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + return db } - return db } // excludeAllowed can be used as a scope for a db transaction to exclude allowed // hosts. -func (ss *SQLStore) excludeAllowed(db *gorm.DB) *gorm.DB { - ss.mu.Lock() - defer ss.mu.Unlock() +func (ss *SQLStore) excludeAllowed(alias string) func(db *gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + ss.mu.Lock() + defer ss.mu.Unlock() - if ss.hasAllowlist { - db = db.Where("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if ss.hasBlocklist { - db = db.Where("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if !ss.hasAllowlist && !ss.hasBlocklist { - // if neither an allowlist nor a blocklist exist, all hosts are allowed - // which means we return none - db = db.Where("1 = 0") + if ss.hasAllowlist { + db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + if ss.hasBlocklist { + db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + if !ss.hasAllowlist && !ss.hasBlocklist { + // if neither an allowlist nor a blocklist exist, all hosts are allowed + // which means we return none + db = db.Where("1 = 0") + } + return db } - return db } func (ss *SQLStore) isBlocked(h dbHost) (blocked bool) { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 06692ad8c..750ae65f5 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "os" "reflect" "testing" "time" @@ -1065,7 +1066,11 @@ func TestAnnouncementMaxAge(t *testing.T) { } func TestHostInfo(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + cfg := defaultTestSQLStoreConfig + cfg.persistent = true + cfg.dir = "/Users/peterjan/testing" + os.RemoveAll(cfg.dir) + ss := newTestSQLStore(t, cfg) defer ss.Close() // fetch info for a non-existing autopilot @@ -1133,7 +1138,7 @@ func TestHostInfo(t *testing.T) { } // add another host info - err = ss.addTestHost(types.PublicKey{2}) + err = ss.addCustomTestHost(types.PublicKey{2}, "bar.com:1000") if err != nil { t.Fatal(err) } @@ -1143,7 +1148,7 @@ func TestHostInfo(t *testing.T) { } // fetch all infos for autopilot - his, err := ss.HostInfos(context.Background(), "foo") + his, err := ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 2 { @@ -1151,6 +1156,102 @@ func TestHostInfo(t *testing.T) { } else if his[0].Host.PublicKey != (types.PublicKey{1}) || his[1].Host.PublicKey != (types.PublicKey{2}) { t.Fatal("unexpected", his) } + + // fetch infos using offset & limit + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 1, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 2, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 0 { + t.Fatal("unexpected") + } + + // fetch infos using net addresses + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "bar", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } + + // fetch infos using keyIn + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{{2}}, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } + + // fetch infos using mode filters + err = ss.UpdateHostBlocklistEntries(context.Background(), []string{"bar.com:1000"}, nil, false) + if err != nil { + t.Fatal(err) + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{1}) { + t.Fatal("unexpected", his) + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } + err = ss.UpdateHostBlocklistEntries(context.Background(), nil, nil, true) + if err != nil { + t.Fatal(err) + } + + // fetch infos using usability filters + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 0 { + t.Fatal("unexpected") + } + // update info + want.Usability.Blocked = false + want.Usability.Offline = false + want.Usability.LowScore = false + want.Usability.RedundantIP = false + want.Usability.Gouging = false + want.Usability.NotAcceptingContracts = false + want.Usability.NotAnnounced = false + want.Usability.NotCompletingScan = false + want.Usability.Unknown = false + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{1}) { + t.Fatal("unexpected", his) + } } // addTestHosts adds 'n' hosts to the db and returns their keys. diff --git a/worker/upload.go b/worker/upload.go index ab84e2b37..1a5e230c0 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -276,7 +276,6 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe // wait for all threads to finish wg.Wait() - return } func (w *worker) tryUploadPackedSlab(ctx context.Context, mem Memory, ps api.PackedSlab, rs api.RedundancySettings, contractSet string, lockPriority int) error { From f0ce1c81cab0f562bc59a028c1221325eaeb09e5 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 08:49:05 +0100 Subject: [PATCH 061/201] autopilot: add compatV105HostInfo --- api/autopilot.go | 15 --- api/host.go | 31 +++++++ autopilot/autopilot.go | 124 +++++++++++++++++-------- autopilot/client.go | 4 +- autopilot/hostinfo.go | 206 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 324 insertions(+), 56 deletions(-) create mode 100644 autopilot/hostinfo.go diff --git a/api/autopilot.go b/api/autopilot.go index b20696fa9..d40f84ee4 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -119,19 +119,4 @@ type ( } Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` } - - // HostHandlerResponse is the response type for the /host/:hostkey endpoint. - HostHandlerResponse struct { - Host hostdb.Host `json:"host"` - Checks *HostHandlerResponseChecks `json:"checks,omitempty"` - } - - HostHandlerResponseChecks struct { - Gouging bool `json:"gouging"` - GougingBreakdown HostGougingBreakdown `json:"gougingBreakdown"` - Score float64 `json:"score"` - ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` - Usable bool `json:"usable"` - UnusableReasons []string `json:"unusableReasons"` - } ) diff --git a/api/host.go b/api/host.go index 18ae2cc38..11767979c 100644 --- a/api/host.go +++ b/api/host.go @@ -74,6 +74,23 @@ type ( // endpoint. HostInfosRequest SearchHostsRequest + // HostInfoResponse is the response type for the /host/:hostkey endpoint. + // + // TODO: on next major release consider returning an api.HostInfo + HostInfoResponse struct { + Host hostdb.Host `json:"host"` + Checks *HostChecks `json:"checks,omitempty"` + } + + HostChecks struct { + Gouging bool `json:"gouging"` + GougingBreakdown HostGougingBreakdown `json:"gougingBreakdown"` + Score float64 `json:"score"` + ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` + Usable bool `json:"usable"` + UnusableReasons []string `json:"unusableReasons"` + } + // UpdateHostInfoRequest is the request type for the PUT // /autopilot/:id/host/:hostkey endpoint. UpdateHostInfoRequest struct { @@ -266,6 +283,20 @@ func (ub HostUsabilityBreakdown) UnusableReasons() []string { return reasons } +func (hi HostInfo) ToHostInfoReponse() HostInfoResponse { + return HostInfoResponse{ + Host: hi.Host, + Checks: &HostChecks{ + Gouging: hi.Usability.Gouging, + GougingBreakdown: hi.Gouging, + Score: hi.Score.Score(), + ScoreBreakdown: hi.Score, + Usable: hi.Usability.Usable(), + UnusableReasons: hi.Usability.UnusableReasons(), + }, + } +} + func (c AutopilotConfig) Validate() error { if c.Hosts.MaxDowntimeHours > 99*365*24 { return ErrMaxDowntimeHoursTooHigh diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 608e67463..32cccf3d5 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -684,16 +684,56 @@ func (ap *Autopilot) triggerHandlerPOST(jc jape.Context) { } func (ap *Autopilot) hostHandlerGET(jc jape.Context) { - var hostKey types.PublicKey - if jc.DecodeParam("hostKey", &hostKey) != nil { + var hk types.PublicKey + if jc.DecodeParam("hostKey", &hk) != nil { return } - host, err := ap.bus.HostInfo(jc.Request.Context(), ap.id, hostKey) + // TODO: remove on next major release + h, err := compatV105HostInfo(jc.Request.Context(), ap.State(), ap.bus, hk) if jc.Check("failed to get host info", err) != nil { return } - jc.Encode(host) + + hi, err := ap.bus.HostInfo(jc.Request.Context(), ap.id, hk) + if utils.IsErr(err, api.ErrHostInfoNotFound) { + // TODO PJ: we used to calculate the host info here on the fly, maybe we + // should keep doing that but maybe we can get away with this too... + jc.Encode(api.HostInfoResponse{ + Host: h.Host, + Checks: nil, + }) + return + } else if jc.Check("failed to get host info", err) != nil { + return + } + + jc.Encode(hi.ToHostInfoReponse()) +} + +func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { + var req api.HostInfosRequest + if jc.Decode(&req) != nil { + return + } + hosts, err := ap.bus.HostInfos(jc.Request.Context(), ap.id, api.HostInfoOptions{ + UsabilityMode: req.UsabilityMode, + SearchHostOptions: api.SearchHostOptions{ + FilterMode: req.FilterMode, + AddressContains: req.AddressContains, + KeyIn: req.KeyIn, + Offset: req.Offset, + Limit: req.Limit, + }, + }) + if jc.Check("failed to get host info", err) != nil { + return + } + resps := make([]api.HostInfoResponse, len(hosts)) + for i, host := range hosts { + resps[i] = host.ToHostInfoReponse() + } + jc.Encode(resps) } func (ap *Autopilot) stateHandlerGET(jc jape.Context) { @@ -727,41 +767,6 @@ func (ap *Autopilot) stateHandlerGET(jc jape.Context) { }) } -func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.HostInfosRequest - if jc.Decode(&req) != nil { - return - } - hosts, err := ap.bus.HostInfos(jc.Request.Context(), ap.id, api.HostInfoOptions{ - UsabilityMode: req.UsabilityMode, - SearchHostOptions: api.SearchHostOptions{ - FilterMode: req.FilterMode, - AddressContains: req.AddressContains, - KeyIn: req.KeyIn, - Offset: req.Offset, - Limit: req.Limit, - }, - }) - if jc.Check("failed to get host info", err) != nil { - return - } - resps := make([]api.HostHandlerResponse, len(hosts)) - for i, host := range hosts { - resps[i] = api.HostHandlerResponse{ - Host: host.Host, - Checks: &api.HostHandlerResponseChecks{ - Gouging: host.Gouging.Gouging(), - GougingBreakdown: host.Gouging, - Score: host.Score.Score(), - ScoreBreakdown: host.Score, - Usable: host.Usability.Usable(), - UnusableReasons: host.Usability.UnusableReasons(), - }, - } - } - jc.Encode(resps) -} - func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { @@ -930,3 +935,44 @@ func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg nSteps++ } } + +// compatV105HostInfo performs some state checks and bus calls we no longer +// need, but are necessary checks to make sure our API is consistent. This +// should be considered for removal when releasing a new major version. +func compatV105HostInfo(ctx context.Context, s state, b Bus, hk types.PublicKey) (*hostdb.HostInfo, error) { + // state checks + if s.cfg.Contracts.Allowance.IsZero() { + return nil, fmt.Errorf("can not score hosts because contracts allowance is zero") + } + if s.cfg.Contracts.Amount == 0 { + return nil, fmt.Errorf("can not score hosts because contracts amount is zero") + } + if s.cfg.Contracts.Period == 0 { + return nil, fmt.Errorf("can not score hosts because contract period is zero") + } + + // fetch host + host, err := b.Host(ctx, hk) + if err != nil { + return nil, fmt.Errorf("failed to fetch requested host from bus: %w", err) + } + + // other checks + _, err = b.GougingSettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch gouging settings from bus: %w", err) + } + _, err = b.RedundancySettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) + } + _, err = b.ConsensusState(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch consensus state from bus: %w", err) + } + _, err = b.RecommendedFee(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch recommended fee from bus: %w", err) + } + return &host, nil +} diff --git a/autopilot/client.go b/autopilot/client.go index ba16754a5..41b0b3207 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -34,13 +34,13 @@ func (c *Client) UpdateConfig(cfg api.AutopilotConfig) error { } // HostInfo returns information about the host with given host key. -func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostHandlerResponse, err error) { +func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostInfoResponse, err error) { err = c.c.GET(fmt.Sprintf("/host/%s", hostKey), &resp) return } // HostInfo returns information about all hosts. -func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode string, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostHandlerResponse, err error) { +func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode string, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostInfoResponse, err error) { err = c.c.POST("/hosts", api.SearchHostsRequest{ Offset: offset, Limit: limit, diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go new file mode 100644 index 000000000..263cd5d4b --- /dev/null +++ b/autopilot/hostinfo.go @@ -0,0 +1,206 @@ +package autopilot + +import ( + "context" + "fmt" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/worker" +) + +func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api.HostInfoResponse, error) { + state := c.ap.State() + + if state.cfg.Contracts.Allowance.IsZero() { + return api.HostInfoResponse{}, fmt.Errorf("can not score hosts because contracts allowance is zero") + } + if state.cfg.Contracts.Amount == 0 { + return api.HostInfoResponse{}, fmt.Errorf("can not score hosts because contracts amount is zero") + } + if state.cfg.Contracts.Period == 0 { + return api.HostInfoResponse{}, fmt.Errorf("can not score hosts because contract period is zero") + } + + host, err := c.ap.bus.Host(ctx, hostKey) + if err != nil { + return api.HostInfoResponse{}, fmt.Errorf("failed to fetch requested host from bus: %w", err) + } + gs, err := c.ap.bus.GougingSettings(ctx) + if err != nil { + return api.HostInfoResponse{}, fmt.Errorf("failed to fetch gouging settings from bus: %w", err) + } + rs, err := c.ap.bus.RedundancySettings(ctx) + if err != nil { + return api.HostInfoResponse{}, fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) + } + cs, err := c.ap.bus.ConsensusState(ctx) + if err != nil { + return api.HostInfoResponse{}, fmt.Errorf("failed to fetch consensus state from bus: %w", err) + } + fee, err := c.ap.bus.RecommendedFee(ctx) + if err != nil { + return api.HostInfoResponse{}, fmt.Errorf("failed to fetch recommended fee from bus: %w", err) + } + c.mu.Lock() + storedData := c.cachedDataStored[hostKey] + minScore := c.cachedMinScore + c.mu.Unlock() + + gc := worker.NewGougingChecker(gs, cs, fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + + // ignore the pricetable's HostBlockHeight by setting it to our own blockheight + host.Host.PriceTable.HostBlockHeight = cs.BlockHeight + + isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.Host, minScore, storedData) + return api.HostInfoResponse{ + Host: host.Host, + Checks: &api.HostChecks{ + Gouging: unusableResult.gougingBreakdown.Gouging(), + GougingBreakdown: unusableResult.gougingBreakdown, + Score: unusableResult.scoreBreakdown.Score(), + ScoreBreakdown: unusableResult.scoreBreakdown, + Usable: isUsable, + UnusableReasons: unusableResult.reasons(), + }, + }, nil +} + +func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.Host) (hi hostInfo, found bool) { + // grab host details from cache + c.mu.Lock() + hi, found = c.cachedHostInfo[host.PublicKey] + storedData := c.cachedDataStored[host.PublicKey] + minScore := c.cachedMinScore + c.mu.Unlock() + + // return early if the host info is not cached + if !found { + return + } + + // try and refresh the host info if it got scanned in the meantime, this + // inconsistency would resolve itself but trying to update it here improves + // first time user experience + if host.Scanned && hi.UnusableResult.notcompletingscan > 0 { + cs, err := c.ap.bus.ConsensusState(ctx) + if err != nil { + c.logger.Error("failed to fetch consensus state from bus: %v", err) + } else { + state := c.ap.State() + gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, storedData) + hi = hostInfo{ + Usable: isUsable, + UnusableResult: unusableResult, + } + + // update cache + c.mu.Lock() + c.cachedHostInfo[host.PublicKey] = hi + c.mu.Unlock() + } + } + + return +} + +func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostInfoResponse, error) { + // declare helper to decide whether to keep a host. + if !isValidUsabilityFilterMode(usabilityMode) { + return nil, fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) + } + + keep := func(usable bool) bool { + switch usabilityMode { + case api.UsabilityFilterModeUsable: + return usable // keep usable + case api.UsabilityFilterModeUnusable: + return !usable // keep unusable + case api.UsabilityFilterModeAll: + return true // keep all + case "": + return true // keep all + default: + panic("unreachable") + } + } + + var hostInfos []api.HostInfoResponse + wanted := limit + for { + // fetch up to 'limit' hosts. + hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{ + Offset: offset, + Limit: limit, + FilterMode: filterMode, + AddressContains: addressContains, + KeyIn: keyIn, + }) + if err != nil { + return nil, err + } + offset += len(hosts) + + // if there are no more hosts, we're done. + if len(hosts) == 0 { + return hostInfos, nil // no more hosts + } + + // decide how many of the returned hosts to keep. + var keptHosts int + for _, host := range hosts { + hi, cached := c.hostInfoFromCache(ctx, host) + if !cached { + // when the filterMode is "all" we include uncached hosts and + // set IsChecked = false. + if usabilityMode == api.UsabilityFilterModeAll { + hostInfos = append(hostInfos, api.HostInfoResponse{ + Host: host, + }) + if wanted > 0 && len(hostInfos) == wanted { + return hostInfos, nil // we're done. + } + keptHosts++ + } + continue + } + if !keep(hi.Usable) { + continue + } + hostInfos = append(hostInfos, api.HostInfoResponse{ + Host: host, + Checks: &api.HostChecks{ + Gouging: hi.UnusableResult.gougingBreakdown.Gouging(), + GougingBreakdown: hi.UnusableResult.gougingBreakdown, + Score: hi.UnusableResult.scoreBreakdown.Score(), + ScoreBreakdown: hi.UnusableResult.scoreBreakdown, + Usable: hi.Usable, + UnusableReasons: hi.UnusableResult.reasons(), + }, + }) + if wanted > 0 && len(hostInfos) == wanted { + return hostInfos, nil // we're done. + } + keptHosts++ + } + + // if no hosts were kept from this batch, double the limit. + if limit > 0 && keptHosts == 0 { + limit *= 2 + } + } +} + +func isValidUsabilityFilterMode(usabilityMode string) bool { + switch usabilityMode { + case api.UsabilityFilterModeUsable: + case api.UsabilityFilterModeUnusable: + case api.UsabilityFilterModeAll: + case "": + default: + return false + } + return true +} From 8efc718a01d53ab1ec948d50b9f1c0eaea7a2a84 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 09:11:08 +0100 Subject: [PATCH 062/201] autopilot: add compatV105UsabilityFilterModeCheck --- api/autopilot.go | 1 - autopilot/autopilot.go | 21 +++++ autopilot/hostinfo.go | 206 ----------------------------------------- stores/hostdb.go | 1 + stores/hostdb_test.go | 1 + 5 files changed, 23 insertions(+), 207 deletions(-) delete mode 100644 autopilot/hostinfo.go diff --git a/api/autopilot.go b/api/autopilot.go index d40f84ee4..70914afc3 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -4,7 +4,6 @@ import ( "errors" "go.sia.tech/core/types" - "go.sia.tech/renterd/hostdb" ) const ( diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 32cccf3d5..c8df13279 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -716,6 +716,15 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } + + // TODO: remove on next major release + if jc.Check("failed to get host info", compatV105UsabilityFilterModeCheck(req.UsabilityMode)) != nil { + return + } + + // TODO PJ: we used to return hosts regardless of whether they have host + // info if usability mode was set to "all" - it is annoying but maybe we + // should keep doing that hosts, err := ap.bus.HostInfos(jc.Request.Context(), ap.id, api.HostInfoOptions{ UsabilityMode: req.UsabilityMode, SearchHostOptions: api.SearchHostOptions{ @@ -976,3 +985,15 @@ func compatV105HostInfo(ctx context.Context, s state, b Bus, hk types.PublicKey) } return &host, nil } + +func compatV105UsabilityFilterModeCheck(usabilityMode string) error { + switch usabilityMode { + case api.UsabilityFilterModeUsable: + case api.UsabilityFilterModeUnusable: + case api.UsabilityFilterModeAll: + case "": + default: + return fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) + } + return nil +} diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go deleted file mode 100644 index 263cd5d4b..000000000 --- a/autopilot/hostinfo.go +++ /dev/null @@ -1,206 +0,0 @@ -package autopilot - -import ( - "context" - "fmt" - - "go.sia.tech/core/types" - "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" - "go.sia.tech/renterd/worker" -) - -func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api.HostInfoResponse, error) { - state := c.ap.State() - - if state.cfg.Contracts.Allowance.IsZero() { - return api.HostInfoResponse{}, fmt.Errorf("can not score hosts because contracts allowance is zero") - } - if state.cfg.Contracts.Amount == 0 { - return api.HostInfoResponse{}, fmt.Errorf("can not score hosts because contracts amount is zero") - } - if state.cfg.Contracts.Period == 0 { - return api.HostInfoResponse{}, fmt.Errorf("can not score hosts because contract period is zero") - } - - host, err := c.ap.bus.Host(ctx, hostKey) - if err != nil { - return api.HostInfoResponse{}, fmt.Errorf("failed to fetch requested host from bus: %w", err) - } - gs, err := c.ap.bus.GougingSettings(ctx) - if err != nil { - return api.HostInfoResponse{}, fmt.Errorf("failed to fetch gouging settings from bus: %w", err) - } - rs, err := c.ap.bus.RedundancySettings(ctx) - if err != nil { - return api.HostInfoResponse{}, fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) - } - cs, err := c.ap.bus.ConsensusState(ctx) - if err != nil { - return api.HostInfoResponse{}, fmt.Errorf("failed to fetch consensus state from bus: %w", err) - } - fee, err := c.ap.bus.RecommendedFee(ctx) - if err != nil { - return api.HostInfoResponse{}, fmt.Errorf("failed to fetch recommended fee from bus: %w", err) - } - c.mu.Lock() - storedData := c.cachedDataStored[hostKey] - minScore := c.cachedMinScore - c.mu.Unlock() - - gc := worker.NewGougingChecker(gs, cs, fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - - // ignore the pricetable's HostBlockHeight by setting it to our own blockheight - host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.Host, minScore, storedData) - return api.HostInfoResponse{ - Host: host.Host, - Checks: &api.HostChecks{ - Gouging: unusableResult.gougingBreakdown.Gouging(), - GougingBreakdown: unusableResult.gougingBreakdown, - Score: unusableResult.scoreBreakdown.Score(), - ScoreBreakdown: unusableResult.scoreBreakdown, - Usable: isUsable, - UnusableReasons: unusableResult.reasons(), - }, - }, nil -} - -func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.Host) (hi hostInfo, found bool) { - // grab host details from cache - c.mu.Lock() - hi, found = c.cachedHostInfo[host.PublicKey] - storedData := c.cachedDataStored[host.PublicKey] - minScore := c.cachedMinScore - c.mu.Unlock() - - // return early if the host info is not cached - if !found { - return - } - - // try and refresh the host info if it got scanned in the meantime, this - // inconsistency would resolve itself but trying to update it here improves - // first time user experience - if host.Scanned && hi.UnusableResult.notcompletingscan > 0 { - cs, err := c.ap.bus.ConsensusState(ctx) - if err != nil { - c.logger.Error("failed to fetch consensus state from bus: %v", err) - } else { - state := c.ap.State() - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, storedData) - hi = hostInfo{ - Usable: isUsable, - UnusableResult: unusableResult, - } - - // update cache - c.mu.Lock() - c.cachedHostInfo[host.PublicKey] = hi - c.mu.Unlock() - } - } - - return -} - -func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostInfoResponse, error) { - // declare helper to decide whether to keep a host. - if !isValidUsabilityFilterMode(usabilityMode) { - return nil, fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) - } - - keep := func(usable bool) bool { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - return usable // keep usable - case api.UsabilityFilterModeUnusable: - return !usable // keep unusable - case api.UsabilityFilterModeAll: - return true // keep all - case "": - return true // keep all - default: - panic("unreachable") - } - } - - var hostInfos []api.HostInfoResponse - wanted := limit - for { - // fetch up to 'limit' hosts. - hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{ - Offset: offset, - Limit: limit, - FilterMode: filterMode, - AddressContains: addressContains, - KeyIn: keyIn, - }) - if err != nil { - return nil, err - } - offset += len(hosts) - - // if there are no more hosts, we're done. - if len(hosts) == 0 { - return hostInfos, nil // no more hosts - } - - // decide how many of the returned hosts to keep. - var keptHosts int - for _, host := range hosts { - hi, cached := c.hostInfoFromCache(ctx, host) - if !cached { - // when the filterMode is "all" we include uncached hosts and - // set IsChecked = false. - if usabilityMode == api.UsabilityFilterModeAll { - hostInfos = append(hostInfos, api.HostInfoResponse{ - Host: host, - }) - if wanted > 0 && len(hostInfos) == wanted { - return hostInfos, nil // we're done. - } - keptHosts++ - } - continue - } - if !keep(hi.Usable) { - continue - } - hostInfos = append(hostInfos, api.HostInfoResponse{ - Host: host, - Checks: &api.HostChecks{ - Gouging: hi.UnusableResult.gougingBreakdown.Gouging(), - GougingBreakdown: hi.UnusableResult.gougingBreakdown, - Score: hi.UnusableResult.scoreBreakdown.Score(), - ScoreBreakdown: hi.UnusableResult.scoreBreakdown, - Usable: hi.Usable, - UnusableReasons: hi.UnusableResult.reasons(), - }, - }) - if wanted > 0 && len(hostInfos) == wanted { - return hostInfos, nil // we're done. - } - keptHosts++ - } - - // if no hosts were kept from this batch, double the limit. - if limit > 0 && keptHosts == 0 { - limit *= 2 - } - } -} - -func isValidUsabilityFilterMode(usabilityMode string) bool { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - case api.UsabilityFilterModeUnusable: - case api.UsabilityFilterModeAll: - case "": - default: - return false - } - return true -} diff --git a/stores/hostdb.go b/stores/hostdb.go index 8975c7006..a8763f2ca 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -651,6 +651,7 @@ func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string, filterMod // fetch host info var infos []dbHostInfo if err := query. + Debug(). Offset(offset). Limit(limit). Find(&infos). diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 750ae65f5..8b154abc2 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -1230,6 +1230,7 @@ func TestHostInfo(t *testing.T) { } else if len(his) != 0 { t.Fatal("unexpected") } + // update info want.Usability.Blocked = false want.Usability.Offline = false From 50b7d76f5ad01ead2668e62ef06a7a488bdeef89 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 09:24:04 +0100 Subject: [PATCH 063/201] stores: remove usability unknonwn --- api/host.go | 12 +++--------- autopilot/autopilot.go | 3 --- autopilot/contractor.go | 4 ++-- autopilot/hostfilter.go | 7 +------ autopilot/hostscore_test.go | 16 ++++++++-------- internal/test/e2e/cluster_test.go | 4 ++-- stores/hostdb.go | 11 ++++------- stores/hostdb_test.go | 2 -- .../mysql/main/migration_00007_host_info.sql | 2 -- stores/migrations/mysql/main/schema.sql | 2 -- .../sqlite/main/migration_00007_host_info.sql | 2 -- stores/migrations/sqlite/main/schema.sql | 3 +-- 12 files changed, 21 insertions(+), 47 deletions(-) diff --git a/api/host.go b/api/host.go index 11767979c..2b198474b 100644 --- a/api/host.go +++ b/api/host.go @@ -40,7 +40,6 @@ var ( ErrUsabilityHostNotAcceptingContracts = errors.New("host is not accepting contracts") ErrUsabilityHostNotCompletingScan = errors.New("host is not completing scan") ErrUsabilityHostNotAnnounced = errors.New("host is not announced") - ErrUsabilityUnknown = errors.New("unknown") ) type ( @@ -196,7 +195,6 @@ type ( NotAcceptingContracts bool `json:"notAcceptingContracts"` NotAnnounced bool `json:"notAnnounced"` NotCompletingScan bool `json:"notCompletingScan"` - Unknown bool `json:"unknown"` } ) @@ -235,7 +233,7 @@ func (hgb HostGougingBreakdown) String() string { return strings.Join(reasons, ";") } -func (sb HostScoreBreakdown) Score() float64 { +func (sb HostScoreBreakdown) TotalScore() float64 { return sb.Age * sb.Collateral * sb.Interactions * sb.StorageRemaining * sb.Uptime * sb.Version * sb.Prices } @@ -247,8 +245,7 @@ func (ub HostUsabilityBreakdown) Usable() bool { !ub.Gouging && !ub.NotAcceptingContracts && !ub.NotAnnounced && - !ub.NotCompletingScan && - !ub.Unknown + !ub.NotCompletingScan } func (ub HostUsabilityBreakdown) UnusableReasons() []string { @@ -277,9 +274,6 @@ func (ub HostUsabilityBreakdown) UnusableReasons() []string { if ub.NotCompletingScan { reasons = append(reasons, ErrUsabilityHostNotCompletingScan.Error()) } - if ub.Unknown { - reasons = append(reasons, ErrUsabilityUnknown.Error()) - } return reasons } @@ -289,7 +283,7 @@ func (hi HostInfo) ToHostInfoReponse() HostInfoResponse { Checks: &HostChecks{ Gouging: hi.Usability.Gouging, GougingBreakdown: hi.Gouging, - Score: hi.Score.Score(), + Score: hi.Score.TotalScore(), ScoreBreakdown: hi.Score, Usable: hi.Usability.Usable(), UnusableReasons: hi.Usability.UnusableReasons(), diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c8df13279..beb32e589 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -809,9 +809,6 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu if hi.Usability.NotCompletingScan { resp.Unusable.NotScanned++ } - if hi.Usability.Unknown { - resp.Unusable.Unknown++ - } if hi.Gouging.ContractErr != "" { resp.Unusable.Gouging.Contract++ } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 814cba2d0..a4504ab29 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -1357,13 +1357,13 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us h.PriceTable.HostBlockHeight = cs.BlockHeight hi := calculateHostInfo(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) if hi.Usability.Usable() { - candidates = append(candidates, scoredHost{h, hi.Score.Score()}) + candidates = append(candidates, scoredHost{h, hi.Score.TotalScore()}) continue } // keep track of unusable host results unusableHosts.track(hi.Usability) - if hi.Score.Score() == 0 { + if hi.Score.TotalScore() == 0 { zeros++ } unusable++ diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 462be4588..c1a55380d 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -49,7 +49,6 @@ type unusableHostsBreakdown struct { notacceptingcontracts uint64 notannounced uint64 notcompletingscan uint64 - unknown uint64 } func (u *unusableHostsBreakdown) track(ub api.HostUsabilityBreakdown) { @@ -77,9 +76,6 @@ func (u *unusableHostsBreakdown) track(ub api.HostUsabilityBreakdown) { if ub.NotCompletingScan { u.notcompletingscan++ } - if ub.Unknown { - u.unknown++ - } } func (u *unusableHostsBreakdown) keysAndValues() []interface{} { @@ -92,7 +88,6 @@ func (u *unusableHostsBreakdown) keysAndValues() []interface{} { "notacceptingcontracts", u.notacceptingcontracts, "notcompletingscan", u.notcompletingscan, "notannounced", u.notannounced, - "unknown", u.unknown, } for i := 0; i < len(values); i += 2 { if values[i+1].(uint64) == 0 { @@ -143,7 +138,7 @@ func calculateHostInfo(cfg api.AutopilotConfig, rs api.RedundancySettings, gc wo // checks in its cost calculations needed to calculate the period // cost sb = hostScore(cfg, h, storedData, rs.Redundancy()) - if sb.Score() < minScore { + if sb.TotalScore() < minScore { ub.LowScore = true } } diff --git a/autopilot/hostscore_test.go b/autopilot/hostscore_test.go index e48417235..464369052 100644 --- a/autopilot/hostscore_test.go +++ b/autopilot/hostscore_test.go @@ -48,7 +48,7 @@ func TestHostScore(t *testing.T) { // assert age affects the score h1.KnownSince = time.Now().Add(-1 * day) - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() { t.Fatal("unexpected") } @@ -57,21 +57,21 @@ func TestHostScore(t *testing.T) { settings.Collateral = settings.Collateral.Div64(2) settings.MaxCollateral = settings.MaxCollateral.Div64(2) h1 = newHost(settings) // reset - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() { t.Fatal("unexpected") } // assert interactions affect the score h1 = newHost(newTestHostSettings()) // reset h1.Interactions.SuccessfulInteractions++ - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() { t.Fatal("unexpected") } // assert uptime affects the score h2 = newHost(newTestHostSettings()) // reset h2.Interactions.SecondToLastScanSuccess = false - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() || ageScore(h1) != ageScore(h2) { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() || ageScore(h1) != ageScore(h2) { t.Fatal("unexpected") } @@ -79,28 +79,28 @@ func TestHostScore(t *testing.T) { h2Settings := newTestHostSettings() h2Settings.Version = "1.5.6" // lower h2 = newHost(h2Settings) // reset - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() { t.Fatal("unexpected") } // asseret remaining storage affects the score. h1 = newHost(newTestHostSettings()) // reset h2.Settings.RemainingStorage = 100 - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() { t.Fatal("unexpected") } // assert MaxCollateral affects the score. h2 = newHost(newTestHostSettings()) // reset h2.PriceTable.MaxCollateral = types.ZeroCurrency - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() { t.Fatal("unexpected") } // assert price affects the score. h2 = newHost(newTestHostSettings()) // reset h2.PriceTable.WriteBaseCost = types.Siacoins(1) - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, 0, redundancy).TotalScore() <= hostScore(cfg, h2, 0, redundancy).TotalScore() { t.Fatal("unexpected") } } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2346f7019..18296a1f8 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -153,7 +153,7 @@ func TestNewTestCluster(t *testing.T) { if err != nil { t.Fatal(err) } - if hi.Checks.ScoreBreakdown.Score() == 0 { + if hi.Checks.ScoreBreakdown.TotalScore() == 0 { js, _ := json.MarshalIndent(hi.Checks.ScoreBreakdown, "", " ") t.Fatalf("score shouldn't be 0 because that means one of the fields was 0: %s", string(js)) } @@ -175,7 +175,7 @@ func TestNewTestCluster(t *testing.T) { allHosts := make(map[types.PublicKey]struct{}) for _, hi := range hostInfos { - if hi.Checks.ScoreBreakdown.Score() == 0 { + if hi.Checks.ScoreBreakdown.TotalScore() == 0 { js, _ := json.MarshalIndent(hi.Checks.ScoreBreakdown, "", " ") t.Fatalf("score shouldn't be 0 because that means one of the fields was 0: %s", string(js)) } diff --git a/stores/hostdb.go b/stores/hostdb.go index a8763f2ca..1d0b1959d 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -100,7 +100,6 @@ type ( UsabilityNotAcceptingContracts bool `gorm:"index:idx_host_infos_usability_not_accepting_contracts"` UsabilityNotAnnounced bool `gorm:"index:idx_host_infos_usability_not_announced"` UsabilityNotCompletingScan bool `gorm:"index:idx_host_infos_usability_not_completing_scan"` - UsabilityUnknown bool `gorm:"index:idx_host_infos_usability_unknown"` // score ScoreAge float64 `gorm:"index:idx_host_infos_score_age"` @@ -370,7 +369,6 @@ func (hi dbHostInfo) convert() api.HostInfo { NotAcceptingContracts: hi.UsabilityNotAcceptingContracts, NotAnnounced: hi.UsabilityNotAnnounced, NotCompletingScan: hi.UsabilityNotCompletingScan, - Unknown: hi.UsabilityUnknown, }, } } @@ -388,7 +386,6 @@ func convertHostInfo(apID, hID uint, gouging api.HostGougingBreakdown, score api UsabilityNotAcceptingContracts: usability.NotAcceptingContracts, UsabilityNotAnnounced: usability.NotAnnounced, UsabilityNotCompletingScan: usability.NotCompletingScan, - UsabilityUnknown: usability.Unknown, ScoreAge: score.Age, ScoreCollateral: score.Collateral, @@ -619,11 +616,11 @@ func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string, filterMod // apply usability filter switch usabilityMode { case api.UsabilityFilterModeUsable: - query = query.Where("usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ? AND usability_unknown = ?", - false, false, false, false, false, false, false, false, false) + query = query.Where("usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ?", + false, false, false, false, false, false, false, false) case api.UsabilityFilterModeUnusable: - query = query.Where("usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ? OR usability_unknown = ?", - true, true, true, true, true, true, true, true, true) + query = query.Where("usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ?", + true, true, true, true, true, true, true, true) case api.UsabilityFilterModeAll: // nothing to do default: diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 8b154abc2..bf22fb20b 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -1240,7 +1240,6 @@ func TestHostInfo(t *testing.T) { want.Usability.NotAcceptingContracts = false want.Usability.NotAnnounced = false want.Usability.NotCompletingScan = false - want.Usability.Unknown = false err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) if err != nil { t.Fatal(err) @@ -1376,7 +1375,6 @@ func newTestHostInfo(h hostdb.Host) api.HostInfo { NotAcceptingContracts: true, NotAnnounced: true, NotCompletingScan: true, - Unknown: true, }, } } diff --git a/stores/migrations/mysql/main/migration_00007_host_info.sql b/stores/migrations/mysql/main/migration_00007_host_info.sql index 69864b3e1..c13f5c396 100644 --- a/stores/migrations/mysql/main/migration_00007_host_info.sql +++ b/stores/migrations/mysql/main/migration_00007_host_info.sql @@ -14,7 +14,6 @@ CREATE TABLE `host_infos` ( `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, `usability_not_announced` boolean NOT NULL DEFAULT false, `usability_not_completing_scan` boolean NOT NULL DEFAULT false, - `usability_unknown` boolean NOT NULL DEFAULT false, `score_age` double NOT NULL, `score_collateral` double NOT NULL, @@ -40,7 +39,6 @@ CREATE TABLE `host_infos` ( INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), - INDEX `idx_host_infos_usability_unknown` (`usability_unknown`), INDEX `idx_host_infos_score_age` (`score_age`), INDEX `idx_host_infos_score_collateral` (`score_collateral`), INDEX `idx_host_infos_score_interactions` (`score_interactions`), diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 4eaa91499..e39b7f963 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -438,7 +438,6 @@ CREATE TABLE `host_infos` ( `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, `usability_not_announced` boolean NOT NULL DEFAULT false, `usability_not_completing_scan` boolean NOT NULL DEFAULT false, - `usability_unknown` boolean NOT NULL DEFAULT false, `score_age` double NOT NULL, `score_collateral` double NOT NULL, @@ -464,7 +463,6 @@ CREATE TABLE `host_infos` ( INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), - INDEX `idx_host_infos_usability_unknown` (`usability_unknown`), INDEX `idx_host_infos_score_age` (`score_age`), INDEX `idx_host_infos_score_collateral` (`score_collateral`), INDEX `idx_host_infos_score_interactions` (`score_interactions`), diff --git a/stores/migrations/sqlite/main/migration_00007_host_info.sql b/stores/migrations/sqlite/main/migration_00007_host_info.sql index 5d425dfe7..910dd637c 100644 --- a/stores/migrations/sqlite/main/migration_00007_host_info.sql +++ b/stores/migrations/sqlite/main/migration_00007_host_info.sql @@ -14,7 +14,6 @@ CREATE TABLE `host_infos` ( `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, `usability_not_announced` INTEGER NOT NULL DEFAULT 0, `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, - `usability_unknown` INTEGER NOT NULL DEFAULT 0, `score_age` REAL NOT NULL, `score_collateral` REAL NOT NULL, @@ -44,7 +43,6 @@ CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_goug CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); -CREATE INDEX `idx_host_infos_usability_unknown` ON `host_infos` (`usability_unknown`); CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 5ec7a2b0e..791fce1ca 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -150,7 +150,7 @@ CREATE TABLE `object_user_metadata` (`id` integer PRIMARY KEY AUTOINCREMENT,`cre CREATE UNIQUE INDEX `idx_object_user_metadata_key` ON `object_user_metadata`(`db_object_id`,`db_multipart_upload_id`,`key`); -- dbHostInfo -CREATE TABLE `host_infos` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `created_at` datetime, `db_autopilot_id` INTEGER NOT NULL, `db_host_id` INTEGER NOT NULL, `usability_blocked` INTEGER NOT NULL DEFAULT 0, `usability_offline` INTEGER NOT NULL DEFAULT 0, `usability_low_score` INTEGER NOT NULL DEFAULT 0, `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, `usability_gouging` INTEGER NOT NULL DEFAULT 0, `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, `usability_not_announced` INTEGER NOT NULL DEFAULT 0, `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, `usability_unknown` INTEGER NOT NULL DEFAULT 0, `score_age` REAL NOT NULL, `score_collateral` REAL NOT NULL, `score_interactions` REAL NOT NULL, `score_storage_remaining` REAL NOT NULL, `score_uptime` REAL NOT NULL, `score_version` REAL NOT NULL, `score_prices` REAL NOT NULL, `gouging_contract_err` TEXT, `gouging_download_err` TEXT, `gouging_gouging_err` TEXT, `gouging_prune_err` TEXT, `gouging_upload_err` TEXT, FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE); +CREATE TABLE `host_infos` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `created_at` datetime, `db_autopilot_id` INTEGER NOT NULL, `db_host_id` INTEGER NOT NULL, `usability_blocked` INTEGER NOT NULL DEFAULT 0, `usability_offline` INTEGER NOT NULL DEFAULT 0, `usability_low_score` INTEGER NOT NULL DEFAULT 0, `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, `usability_gouging` INTEGER NOT NULL DEFAULT 0, `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, `usability_not_announced` INTEGER NOT NULL DEFAULT 0, `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, `score_age` REAL NOT NULL, `score_collateral` REAL NOT NULL, `score_interactions` REAL NOT NULL, `score_storage_remaining` REAL NOT NULL, `score_uptime` REAL NOT NULL, `score_version` REAL NOT NULL, `score_prices` REAL NOT NULL, `gouging_contract_err` TEXT, `gouging_download_err` TEXT, `gouging_gouging_err` TEXT, `gouging_prune_err` TEXT, `gouging_upload_err` TEXT, FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE); CREATE UNIQUE INDEX `idx_host_infos_id` ON `host_infos` (`db_autopilot_id`, `db_host_id`); CREATE INDEX `idx_host_infos_usability_blocked` ON `host_infos` (`usability_blocked`); CREATE INDEX `idx_host_infos_usability_offline` ON `host_infos` (`usability_offline`); @@ -160,7 +160,6 @@ CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_goug CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); -CREATE INDEX `idx_host_infos_usability_unknown` ON `host_infos` (`usability_unknown`); CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); From 82d74327861b6249e68eb4509932094e1d898387 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 10:55:04 +0100 Subject: [PATCH 064/201] bus: add filter mode to HostsOptions --- api/host.go | 12 ++++-- autopilot/autopilot.go | 12 +++--- autopilot/autopilot_test.go | 47 ++++++++++---------- autopilot/contractor.go | 10 ++--- autopilot/hostfilter.go | 6 ++- autopilot/hostinfo.go | 8 ++-- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 10 +++-- bus/bus.go | 21 +++++++-- bus/client/hosts.go | 4 +- internal/test/e2e/blocklist_test.go | 6 +-- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 30 +++++++++---- stores/hostdb_test.go | 67 +++++++++++++++++------------ 15 files changed, 146 insertions(+), 95 deletions(-) diff --git a/api/host.go b/api/host.go index aea80a9fe..ba66ffd58 100644 --- a/api/host.go +++ b/api/host.go @@ -70,9 +70,10 @@ type ( // Option types. type ( - GetHostsOptions struct { - Offset int - Limit int + HostsOptions struct { + Offset int + Limit int + FilterMode string } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 @@ -95,13 +96,16 @@ func DefaultSearchHostOptions() SearchHostOptions { } } -func (opts GetHostsOptions) Apply(values url.Values) { +func (opts HostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } + if opts.FilterMode != "" { + values.Set("filterMode", opts.FilterMode) + } } func (opts HostsForScanningOptions) Apply(values url.Values) { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..d8a760265 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -54,10 +54,10 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) + Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.Host, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) // metrics RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error @@ -196,7 +196,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.Hosts(ctx, api.GetHostsOptions{}) + hosts, err := ap.bus.Hosts(ctx, api.HostsOptions{}) if jc.Check("failed to get hosts", err) != nil { return } @@ -735,7 +735,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { jc.Encode(hosts) } -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (usables uint64) { +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) @@ -749,7 +749,7 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // evaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.Host) (resp api.ConfigEvaluationResponse) { +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) resp.Hosts = uint64(len(hosts)) @@ -865,7 +865,7 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // optimiseGougingSetting tries to optimise one field of the gouging settings to // try and hit the target number of contracts. -func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.Host) bool { +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { if cfg.Contracts.Amount == 0 { return true // nothing to do } diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index f818c312b..9ebafe675 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -14,31 +14,34 @@ import ( func TestOptimiseGougingSetting(t *testing.T) { // create 10 hosts that should all be usable - var hosts []hostdb.Host + var hosts []hostdb.HostInfo for i := 0; i < 10; i++ { - hosts = append(hosts, hostdb.Host{ - KnownSince: time.Unix(0, 0), - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{ - CollateralCost: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), + hosts = append(hosts, hostdb.HostInfo{ + Host: hostdb.Host{ + KnownSince: time.Unix(0, 0), + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + }, }, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, }, - Settings: rhpv2.HostSettings{ - AcceptingContracts: true, - Collateral: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - Version: "1.6.0", - }, - Interactions: hostdb.Interactions{ - Uptime: time.Hour * 1000, - LastScan: time.Now(), - LastScanSuccess: true, - SecondToLastScanSuccess: true, - TotalScans: 100, - }, - LastAnnouncement: time.Unix(0, 0), - Scanned: true, + Blocked: false, }) } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 83e12a206..47a03480f 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.Hosts(ctx, api.GetHostsOptions{}) + hosts, err := c.ap.bus.Hosts(ctx, api.HostsOptions{}) if err != nil { return false, err } @@ -777,7 +777,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.Host, minScore, contract.FileSize()) + usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, contract.FileSize()) if !usable { reasons := unusableResult.reasons() toStopUsing[fcid] = strings.Join(reasons, ",") @@ -1297,7 +1297,7 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { start := time.Now() // fetch consensus state @@ -1311,7 +1311,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) // select unused hosts that passed a scan - var unused []hostdb.Host + var unused []hostdb.HostInfo var excluded, notcompletedscan int for _, h := range hosts { // filter out used hosts @@ -1348,7 +1348,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.Host, us h.PriceTable.HostBlockHeight = cs.BlockHeight usable, result := isUsableHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) if usable { - candidates = append(candidates, scoredHost{h, result.scoreBreakdown.Score()}) + candidates = append(candidates, scoredHost{h.Host, result.scoreBreakdown.Score()}) continue } diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 574862a97..6f8e4f747 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -176,7 +176,7 @@ func (u *unusableHostResult) keysAndValues() []interface{} { // isUsableHost returns whether the given host is usable along with a list of // reasons why it was deemed unusable. -func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.Host, minScore float64, storedData uint64) (bool, unusableHostResult) { +func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.HostInfo, minScore float64, storedData uint64) (bool, unusableHostResult) { if rs.Validate() != nil { panic("invalid redundancy settings were supplied - developer error") } @@ -187,6 +187,8 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. if !h.IsAnnounced() { errs = append(errs, errHostNotAnnounced) + } else if h.Blocked { + errs = append(errs, errHostBlocked) } else if !h.Scanned { errs = append(errs, errHostNotCompletingScan) } else { @@ -211,7 +213,7 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. // not gouging, this because the core package does not have overflow // checks in its cost calculations needed to calculate the period // cost - scoreBreakdown = hostScore(cfg, h, storedData, rs.Redundancy()) + scoreBreakdown = hostScore(cfg, h.Host, storedData, rs.Redundancy()) if scoreBreakdown.Score() < minScore { errs = append(errs, fmt.Errorf("%w: (%s): %v < %v", errLowScore, scoreBreakdown.String(), scoreBreakdown.Score(), minScore)) } diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go index 82efa1d61..e0cbecadc 100644 --- a/autopilot/hostinfo.go +++ b/autopilot/hostinfo.go @@ -53,7 +53,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.Host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host, minScore, storedData) return api.HostHandlerResponse{ Host: host.Host, Checks: &api.HostHandlerResponseChecks{ @@ -67,7 +67,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api }, nil } -func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.Host) (hi hostInfo, found bool) { +func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo) (hi hostInfo, found bool) { // grab host details from cache c.mu.Lock() hi, found = c.cachedHostInfo[host.PublicKey] @@ -157,7 +157,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a // set IsChecked = false. if usabilityMode == api.UsabilityFilterModeAll { hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, + Host: host.Host, }) if wanted > 0 && len(hostInfos) == wanted { return hostInfos, nil // we're done. @@ -170,7 +170,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a continue } hostInfos = append(hostInfos, api.HostHandlerResponse{ - Host: host, + Host: host.Host, Checks: &api.HostHandlerResponseChecks{ Gouging: hi.UnusableResult.gougingBreakdown.Gouging(), GougingBreakdown: hi.UnusableResult.gougingBreakdown, diff --git a/autopilot/scanner.go b/autopilot/scanner.go index bb21e5022..85301822c 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) + Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 6214ec4a1..481b78046 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb.Host, error) { +func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -32,11 +32,15 @@ func (b *mockBus) Hosts(ctx context.Context, opts api.GetHostsOptions) ([]hostdb end = len(b.hosts) } - return b.hosts[start:end], nil + his := make([]hostdb.HostInfo, len(b.hosts[start:end])) + for i, h := range b.hosts[start:end] { + his[i] = hostdb.HostInfo{Host: h} + } + return his, nil } func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { - hosts, err := b.Hosts(ctx, api.GetHostsOptions{ + hosts, err := b.Hosts(ctx, api.HostsOptions{ Offset: opts.Offset, Limit: opts.Limit, }) diff --git a/bus/bus.go b/bus/bus.go index d8b3fdfc5..3838a1877 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -92,13 +92,13 @@ type ( // A HostDB stores information about hosts. HostDB interface { Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, offset, limit int) ([]hostdb.Host, error) + Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error - SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.Host, error) + SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) @@ -758,10 +758,23 @@ func (b *bus) walletPendingHandler(jc jape.Context) { func (b *bus) hostsHandlerGET(jc jape.Context) { offset := 0 limit := -1 - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { + filterMode := api.HostFilterModeAllowed + if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("filterMode", &filterMode) != nil { return } - hosts, err := b.hdb.Hosts(jc.Request.Context(), offset, limit) + + // validate filterMode + switch filterMode { + case api.HostFilterModeAllowed: + case api.HostFilterModeBlocked: + case api.HostFilterModeAll: + default: + jc.Error(errors.New("invalid filter mode"), http.StatusBadRequest) + return + } + + // fetch hosts + hosts, err := b.hdb.Hosts(jc.Request.Context(), filterMode, offset, limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { return } diff --git a/bus/client/hosts.go b/bus/client/hosts.go index ecf44e52b..70c8b3431 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -30,7 +30,7 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err } // Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []hostdb.Host, err error) { +func (c *Client) Hosts(ctx context.Context, opts api.HostsOptions) (hosts []hostdb.HostInfo, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) @@ -78,7 +78,7 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) } // SearchHosts returns all hosts that match certain search criteria. -func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []hostdb.Host, err error) { +func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []hostdb.HostInfo, err error) { err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ Offset: opts.Offset, Limit: opts.Limit, diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 64acc2fba..e371f01d4 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -117,7 +117,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -142,7 +142,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -152,7 +152,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2346f7019..65febbbf7 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + hosts, err := cluster.Bus.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index de948c970..b5f6cccd0 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err := b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) + hostss, err = b.Hosts(context.Background(), api.HostsOptions{}) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index fd23abf4a..101ee298d 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -461,23 +461,25 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, return hostAddresses, err } -func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.Host, error) { +func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) { if offset < 0 { return nil, ErrNegativeOffset } - var hosts []hostdb.Host - var fullHosts []dbHost - // Apply filter mode. + var blocked bool query := ss.db switch filterMode { case api.HostFilterModeAllowed: query = query.Scopes(ss.excludeBlocked) case api.HostFilterModeBlocked: query = query.Scopes(ss.excludeAllowed) + blocked = true case api.HostFilterModeAll: - // nothing to do + // preload allowlist and blocklist + query = query. + Preload("Allowlist"). + Preload("Blocklist") default: return nil, fmt.Errorf("invalid filter mode: %v", filterMode) } @@ -500,12 +502,24 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains }) } + var hosts []hostdb.HostInfo + var fullHosts []dbHost err := query. Offset(offset). Limit(limit). FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { for _, fh := range fullHosts { - hosts = append(hosts, fh.convert()) + if filterMode == api.HostFilterModeAll { + hosts = append(hosts, hostdb.HostInfo{ + Host: fh.convert(), + Blocked: ss.isBlocked(fh), + }) + } else { + hosts = append(hosts, hostdb.HostInfo{ + Host: fh.convert(), + Blocked: blocked, + }) + } } return nil }). @@ -517,8 +531,8 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains } // Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]hostdb.Host, error) { - return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) +func (ss *SQLStore) Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) { + return ss.SearchHosts(ctx, filterMode, "", nil, offset, limit) } func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 35872ea2d..528700502 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, 0, -1) + allHosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } @@ -171,27 +171,45 @@ func TestSQLHosts(t *testing.T) { hk1, hk2, hk3 := hks[0], hks[1], hks[2] // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.Hosts(ctx, -1, -1); err != ErrNegativeOffset { + if _, err := ss.Hosts(ctx, api.HostFilterModeAllowed, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } - // Add a scan for each host. + // add a custom host and block it + hk4 := types.PublicKey{4} + if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { + t.Fatal("unexpected", err) + } + if err := ss.UpdateHostBlocklistEntries(context.Background(), []string{"host4.com"}, nil, false); err != nil { + t.Fatal("unexpected", err) + } + + // assert host filter mode is applied + if hosts, err := ss.Hosts(ctx, api.HostFilterModeAll, 0, -1); err != nil || len(hosts) != 4 { + t.Fatal("unexpected", len(hosts), err) + } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeBlocked, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + t.Fatal("unexpected", len(hosts), err) + } + + // add a scan for every non-blocked host n := time.Now() if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { t.Fatal(err) @@ -203,39 +221,32 @@ func TestSQLHosts(t *testing.T) { t.Fatal(err) } - // Fetch all hosts using the HostsForScanning method. - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) + // fetch all hosts using the HostsForScanning method + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 4) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 3 { + } else if len(hostAddresses) != 4 { t.Fatal("wrong number of addresses") - } - if hostAddresses[0].PublicKey != hk3 { - t.Fatal("wrong key") - } - if hostAddresses[1].PublicKey != hk2 { - t.Fatal("wrong key") - } - if hostAddresses[2].PublicKey != hk1 { + } else if hostAddresses[0].PublicKey != hk4 || + hostAddresses[1].PublicKey != hk3 || + hostAddresses[2].PublicKey != hk2 || + hostAddresses[3].PublicKey != hk1 { t.Fatal("wrong key") } - // Fetch one host by setting the cutoff exactly to hk2. - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) + // fetch one host by setting the cutoff exactly to hk3 + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-3*time.Minute), 0, -1) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 1 { + } else if len(hostAddresses) != 1 { t.Fatal("wrong number of addresses") } - // Fetch no hosts. + // fetch no hosts hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) if err != nil { t.Fatal(err) - } - if len(hostAddresses) != 0 { + } else if len(hostAddresses) != 0 { t.Fatal("wrong number of addresses") } } @@ -595,7 +606,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } @@ -767,7 +778,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, 0, -1) + hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) if err != nil { t.Fatal(err) } From 37c725e3d84f2ad1015dcecffe4263348ee54be9 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 09:39:29 +0100 Subject: [PATCH 065/201] api: fix japecheck errors --- autopilot/client.go | 2 +- bus/client/hosts.go | 2 +- stores/hostdb_test.go | 7 +------ 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/autopilot/client.go b/autopilot/client.go index 41b0b3207..4d491f457 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -41,7 +41,7 @@ func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostInfoResponse, e // HostInfo returns information about all hosts. func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode string, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostInfoResponse, err error) { - err = c.c.POST("/hosts", api.SearchHostsRequest{ + err = c.c.POST("/hosts", api.HostInfosRequest{ Offset: offset, Limit: limit, FilterMode: filterMode, diff --git a/bus/client/hosts.go b/bus/client/hosts.go index beda5950b..858989ec7 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -119,7 +119,7 @@ func (c *Client) UpdateHostInfo(ctx context.Context, autopilotID string, hostKey // HostInfos returns the host info for all hosts known to the autopilot with the given identifier. func (c *Client) HostInfos(ctx context.Context, autopilotID string, opts api.HostInfoOptions) (hostInfos []api.HostInfo, err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/autopilot/%s", autopilotID), api.HostInfosRequest{ + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/autopilot/%s/hosts", autopilotID), api.HostInfosRequest{ Offset: opts.Offset, Limit: opts.Limit, FilterMode: opts.FilterMode, diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index bf22fb20b..8a75caf6d 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - "os" "reflect" "testing" "time" @@ -1066,11 +1065,7 @@ func TestAnnouncementMaxAge(t *testing.T) { } func TestHostInfo(t *testing.T) { - cfg := defaultTestSQLStoreConfig - cfg.persistent = true - cfg.dir = "/Users/peterjan/testing" - os.RemoveAll(cfg.dir) - ss := newTestSQLStore(t, cfg) + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() // fetch info for a non-existing autopilot From dabe9838bf48351cdbfe376d2ce370fe1ed4db0e Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 11:21:01 +0100 Subject: [PATCH 066/201] autopilot: update host filter --- autopilot/hostfilter.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 6f8e4f747..8de37221a 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -182,13 +182,14 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. } var errs []error + if h.Blocked { + errs = append(errs, errHostBlocked) + } + var gougingBreakdown api.HostGougingBreakdown var scoreBreakdown api.HostScoreBreakdown - if !h.IsAnnounced() { errs = append(errs, errHostNotAnnounced) - } else if h.Blocked { - errs = append(errs, errHostBlocked) } else if !h.Scanned { errs = append(errs, errHostNotCompletingScan) } else { From c011dc835a4899cc3df38f72173d9de81944ef69 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 11:26:09 +0100 Subject: [PATCH 067/201] autopilot: update host filter --- autopilot/hostfilter.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 33a6daa85..7113fe5bc 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -106,15 +106,18 @@ func calculateHostInfo(cfg api.AutopilotConfig, rs api.RedundancySettings, gc wo } // prepare host breakdown fields - var ub api.HostUsabilityBreakdown var gb api.HostGougingBreakdown var sb api.HostScoreBreakdown + var ub api.HostUsabilityBreakdown + + // blocked status does not influence what host info is calculated + if h.Blocked { + ub.Blocked = true + } - // populate host info fields + // calculate remaining host info fields if !h.IsAnnounced() { ub.NotAnnounced = true - } else if h.Blocked { - ub.Blocked = true } else if !h.Scanned { ub.NotCompletingScan = true } else { From 61c7c971a28bd50adb1a8cee847728b57df3e6a5 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 11:34:23 +0100 Subject: [PATCH 068/201] contractor: remove cached fields --- autopilot/contractor.go | 76 ++++++----------------------------------- 1 file changed, 11 insertions(+), 65 deletions(-) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index ef4ff6e28..ba6c6a5d9 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -100,9 +100,6 @@ type ( pruning bool pruningLastStart time.Time - - cachedDataStored map[types.PublicKey]uint64 - cachedMinScore float64 } scoredHost struct { @@ -275,20 +272,14 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( c.logger.Warn("could not calculate min score, no hosts found") } - // update cache. - c.mu.Lock() - c.cachedDataStored = hostData - c.cachedMinScore = minScore - c.mu.Unlock() - // run host checks - err = c.runHostChecks(ctx, hosts) + err = c.runHostChecks(ctx, hosts, hostData, minScore) if err != nil { return false, fmt.Errorf("failed to run host checks, err: %v", err) } // run contract checks - updatedSet, toArchive, toStopUsing, toRefresh, toRenew, err := c.runContractChecks(ctx, w, contracts, isInCurrentSet, minScore) + updatedSet, toArchive, toStopUsing, toRefresh, toRenew, err := c.runContractChecks(ctx, contracts, isInCurrentSet) if err != nil { return false, fmt.Errorf("failed to run contract checks, err: %v", err) } @@ -637,7 +628,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error { return nil } -func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { +func (c *contractor) runContractChecks(ctx context.Context, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { if c.ap.isStopped() { return } @@ -695,6 +686,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts // convenience variables fcid := contract.ID + hk := contract.HostKey // check if contract is ready to be archived. if cs.BlockHeight > contract.EndHeight()-c.revisionSubmissionBuffer { @@ -712,8 +704,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } // fetch host from hostdb - hk := contract.HostKey - host, err := c.ap.bus.Host(ctx, hk) + host, err := c.ap.bus.HostInfo(ctx, c.ap.id, hk) if err != nil { c.logger.Errorw(fmt.Sprintf("missing host, err: %v", err), "hk", hk) toStopUsing[fcid] = api.ErrUsabilityHostNotFound.Error() @@ -722,42 +713,15 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } // if the host is blocked we ignore it, it might be unblocked later - if host.Blocked { + if host.Usability.Blocked { c.logger.Infow("unusable host", "hk", hk, "fcid", fcid, "reasons", api.ErrUsabilityHostBlocked.Error()) toStopUsing[fcid] = api.ErrUsabilityHostBlocked.Error() continue } - // if the host doesn't have a valid pricetable, update it if we were - // able to obtain a revision - invalidPT := contract.Revision == nil - if contract.Revision != nil { - if err := refreshPriceTable(ctx, w, &host.Host); err != nil { - c.logger.Errorf("could not fetch price table for host %v: %v", host.PublicKey, err) - invalidPT = true - } - } - - // refresh the consensus state - if css, err := c.ap.bus.ConsensusState(ctx); err != nil { - c.logger.Errorf("could not fetch consensus state, err: %v", err) - } else { - cs = css - } - - // use a new gouging checker for every contract - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - - // set the host's block height to ours to disable the height check in - // the gouging checks, in certain edge cases the renter might unsync and - // would therefor label all hosts as unusable and go on to create a - // whole new set of contracts with new hosts - host.PriceTable.HostBlockHeight = cs.BlockHeight - - // decide whether the host is still good - hi := calculateHostInfo(state.cfg, state.rs, gc, host, minScore, contract.FileSize()) - if !hi.Usability.Usable() { - reasons := hi.Usability.UnusableReasons() + // check if the host is still usable + if !host.Usability.Usable() { + reasons := host.Usability.UnusableReasons() toStopUsing[fcid] = strings.Join(reasons, ",") c.logger.Infow("unusable host", "hk", hk, "fcid", fcid, "reasons", reasons) continue @@ -778,20 +742,8 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts continue // can't perform contract checks without revision } - // if we were not able to get a valid price table for the host, but we - // did pass the host checks, we only want to be lenient if this contract - // is in the current set and only for a certain number of times, - // controlled by maxKeepLeeway - if invalidPT { - if _, found := inCurrentSet[fcid]; !found || remainingKeepLeeway == 0 { - toStopUsing[fcid] = "no valid price table" - continue - } - remainingKeepLeeway-- // we let it slide - } - // decide whether the contract is still good - ci := contractInfo{contract: contract, priceTable: host.PriceTable.HostPriceTable, settings: host.Settings} + ci := contractInfo{contract: contract, priceTable: host.Host.PriceTable.HostPriceTable, settings: host.Host.Settings} usable, recoverable, refresh, renew, reasons := c.isUsableContract(state.cfg, state, ci, cs.BlockHeight, ipFilter) ci.usable = usable ci.recoverable = recoverable @@ -822,7 +774,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *contractor) runHostChecks(ctx context.Context, hosts []hostdb.HostInfo) error { +func (c *contractor) runHostChecks(ctx context.Context, hosts []hostdb.HostInfo, hostData map[types.PublicKey]uint64, minScore float64) error { // convenience variables state := c.ap.State() @@ -835,12 +787,6 @@ func (c *contractor) runHostChecks(ctx context.Context, hosts []hostdb.HostInfo) // create gouging checker gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - // grab min score and host data from cache - c.mu.Lock() - minScore := c.cachedMinScore - hostData := c.cachedDataStored - c.mu.Unlock() - // update host info for _, h := range hosts { h.PriceTable.HostBlockHeight = cs.BlockHeight // ignore HostBlockHeight From 06070ac03ab1e9ad529ecba101672fb3346f82f7 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 11:37:21 +0100 Subject: [PATCH 069/201] autopilot: remove TODO --- autopilot/autopilot.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 80c50e1a1..a39818c26 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -696,15 +696,7 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { } hi, err := ap.bus.HostInfo(jc.Request.Context(), ap.id, hk) - if utils.IsErr(err, api.ErrHostInfoNotFound) { - // TODO PJ: we used to calculate the host info here on the fly, maybe we - // should keep doing that but maybe we can get away with this too... - jc.Encode(api.HostInfoResponse{ - Host: h.Host, - Checks: nil, - }) - return - } else if jc.Check("failed to get host info", err) != nil { + if jc.Check("failed to get host info", err) != nil { return } From 0d438a7184e9a121e6b9f738b1862465dd3dc969 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 11:41:54 +0100 Subject: [PATCH 070/201] lint: fix --- autopilot/autopilot.go | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index a39818c26..9b82e20ee 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -690,8 +690,7 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { } // TODO: remove on next major release - h, err := compatV105HostInfo(jc.Request.Context(), ap.State(), ap.bus, hk) - if jc.Check("failed to get host info", err) != nil { + if jc.Check("failed to get host info", compatV105HostInfo(jc.Request.Context(), ap.State(), ap.bus, hk)) != nil { return } @@ -937,42 +936,42 @@ func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg // compatV105HostInfo performs some state checks and bus calls we no longer // need, but are necessary checks to make sure our API is consistent. This // should be considered for removal when releasing a new major version. -func compatV105HostInfo(ctx context.Context, s state, b Bus, hk types.PublicKey) (*hostdb.HostInfo, error) { +func compatV105HostInfo(ctx context.Context, s state, b Bus, hk types.PublicKey) error { // state checks if s.cfg.Contracts.Allowance.IsZero() { - return nil, fmt.Errorf("can not score hosts because contracts allowance is zero") + return fmt.Errorf("can not score hosts because contracts allowance is zero") } if s.cfg.Contracts.Amount == 0 { - return nil, fmt.Errorf("can not score hosts because contracts amount is zero") + return fmt.Errorf("can not score hosts because contracts amount is zero") } if s.cfg.Contracts.Period == 0 { - return nil, fmt.Errorf("can not score hosts because contract period is zero") + return fmt.Errorf("can not score hosts because contract period is zero") } // fetch host - host, err := b.Host(ctx, hk) + _, err := b.Host(ctx, hk) if err != nil { - return nil, fmt.Errorf("failed to fetch requested host from bus: %w", err) + return fmt.Errorf("failed to fetch requested host from bus: %w", err) } // other checks _, err = b.GougingSettings(ctx) if err != nil { - return nil, fmt.Errorf("failed to fetch gouging settings from bus: %w", err) + return fmt.Errorf("failed to fetch gouging settings from bus: %w", err) } _, err = b.RedundancySettings(ctx) if err != nil { - return nil, fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) + return fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) } _, err = b.ConsensusState(ctx) if err != nil { - return nil, fmt.Errorf("failed to fetch consensus state from bus: %w", err) + return fmt.Errorf("failed to fetch consensus state from bus: %w", err) } _, err = b.RecommendedFee(ctx) if err != nil { - return nil, fmt.Errorf("failed to fetch recommended fee from bus: %w", err) + return fmt.Errorf("failed to fetch recommended fee from bus: %w", err) } - return &host, nil + return nil } func compatV105UsabilityFilterModeCheck(usabilityMode string) error { From fb444908d6ded2003f3abbae38f48214e325ec35 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 12:59:24 +0100 Subject: [PATCH 071/201] autopilot: update comment --- autopilot/autopilot.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 9b82e20ee..57876a6ad 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -933,9 +933,9 @@ func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg } } -// compatV105HostInfo performs some state checks and bus calls we no longer -// need, but are necessary checks to make sure our API is consistent. This -// should be considered for removal when releasing a new major version. +// compatV105HostInfo performs some state checks and bus calls we no longer need +// but are necessary checks to make sure our API is consistent. This should be +// removed in the next major release. func compatV105HostInfo(ctx context.Context, s state, b Bus, hk types.PublicKey) error { // state checks if s.cfg.Contracts.Allowance.IsZero() { From 041eb4247593be7808335ecff7cfc6db849996a9 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 14:23:47 +0100 Subject: [PATCH 072/201] build: remove MinMaxCollateral --- README.md | 1 - build/env_testnet.go | 1 - 2 files changed, 2 deletions(-) diff --git a/README.md b/README.md index a4ccc8681..1585854f6 100644 --- a/README.md +++ b/README.md @@ -250,7 +250,6 @@ updated using the settings API: "maxUploadPrice": "3000000000000000000000000000", // 3000 SC per 1 TiB "migrationSurchargeMultiplier": 10, // overpay up to 10x for sectors migrations on critical slabs "minAccountExpiry": 86400000000000, // 1 day - "minMaxCollateral": "10000000000000000000000000", // at least up to 10 SC per contract "minMaxEphemeralAccountBalance": "1000000000000000000000000", // 1 SC "minPriceTableValidity": 300000000000 // 5 minutes } diff --git a/build/env_testnet.go b/build/env_testnet.go index 1bc40d287..0bdef28f2 100644 --- a/build/env_testnet.go +++ b/build/env_testnet.go @@ -24,7 +24,6 @@ var ( // // NOTE: default gouging settings for testnet are identical to mainnet. DefaultGougingSettings = api.GougingSettings{ - MinMaxCollateral: types.Siacoins(10), // at least up to 10 SC per contract MaxRPCPrice: types.Siacoins(1).Div64(1000), // 1mS per RPC MaxContractPrice: types.Siacoins(15), // 15 SC per contract MaxDownloadPrice: types.Siacoins(3000), // 3000 SC per 1 TiB From 90f454808b8ad411b30b3d55d11d57616d2eb835 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 14:37:07 +0100 Subject: [PATCH 073/201] autopilot: return affected object ids in migration alerts --- autopilot/alerts.go | 50 +++++++++------ autopilot/autopilot.go | 3 + autopilot/migrator.go | 45 +++++++++++++- internal/test/e2e/migrations_test.go | 93 +++++++++++++++++++++------- 4 files changed, 147 insertions(+), 44 deletions(-) diff --git a/autopilot/alerts.go b/autopilot/alerts.go index f4762c4d4..7a98f8918 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -194,22 +194,37 @@ func newCriticalMigrationSucceededAlert(slabKey object.EncryptionKey) alerts.Ale } } -func newCriticalMigrationFailedAlert(slabKey object.EncryptionKey, health float64, err error) alerts.Alert { +func newCriticalMigrationFailedAlert(slabKey object.EncryptionKey, health float64, objectIds map[string][]string, err error) alerts.Alert { + data := map[string]interface{}{ + "error": err.Error(), + "health": health, + "slabKey": slabKey.String(), + "hint": "If migrations of low-health slabs fail, it might be necessary to increase the MigrationSurchargeMultiplier in the gouging settings to ensure it has every chance of succeeding.", + } + if objectIds != nil { + data["objectIDs"] = objectIds + } + return alerts.Alert{ - ID: alertIDForSlab(alertMigrationID, slabKey), - Severity: alerts.SeverityCritical, - Message: "Critical migration failed", - Data: map[string]interface{}{ - "error": err.Error(), - "health": health, - "slabKey": slabKey.String(), - "hint": "If migrations of low-health slabs fail, it might be necessary to increase the MigrationSurchargeMultiplier in the gouging settings to ensure it has every chance of succeeding.", - }, + ID: alertIDForSlab(alertMigrationID, slabKey), + Severity: alerts.SeverityCritical, + Message: "Critical migration failed", + Data: data, Timestamp: time.Now(), } } -func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, err error) alerts.Alert { +func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, objectIds map[string][]string, err error) alerts.Alert { + data := map[string]interface{}{ + "error": err.Error(), + "health": health, + "slabKey": slabKey.String(), + "hint": "Migration failures can be temporary, but if they persist it can eventually lead to data loss and should therefor be taken very seriously.", + } + if objectIds != nil { + data["objectIDs"] = objectIds + } + severity := alerts.SeverityError if health < 0.25 { severity = alerts.SeverityCritical @@ -218,15 +233,10 @@ func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, err e } return alerts.Alert{ - ID: alertIDForSlab(alertMigrationID, slabKey), - Severity: severity, - Message: "Slab migration failed", - Data: map[string]interface{}{ - "error": err.Error(), - "health": health, - "slabKey": slabKey.String(), - "hint": "Migration failures can be temporary, but if they persist it can eventually lead to data loss and should therefor be taken very seriously.", - }, + ID: alertIDForSlab(alertMigrationID, slabKey), + Severity: severity, + Message: "Slab migration failed", + Data: data, Timestamp: time.Now(), } } diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index c89049286..69b7bc458 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -63,6 +63,9 @@ type Bus interface { RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error RecordContractPruneMetric(ctx context.Context, metrics ...api.ContractPruneMetric) error + // buckets + ListBuckets(ctx context.Context) ([]api.Bucket, error) + // objects ObjectsBySlabKey(ctx context.Context, bucket string, key object.EncryptionKey) (objects []api.ObjectMetadata, err error) RefreshHealth(ctx context.Context) error diff --git a/autopilot/migrator.go b/autopilot/migrator.go index 89ab16a28..d389a051b 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -154,15 +154,23 @@ func (m *migrator) performMigrations(p *workerPool) { start := time.Now() res, err := j.execute(ctx, w) m.statsSlabMigrationSpeedMS.Track(float64(time.Since(start).Milliseconds())) - if err != nil { m.logger.Errorf("%v: migration %d/%d failed, key: %v, health: %v, overpaid: %v, err: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, err) skipAlert := utils.IsErr(err, api.ErrSlabNotFound) if !skipAlert { + // fetch all object IDs for the slab we failed to migrate + var objectIds map[string][]string + if res, err := m.objectIDsForSlabKey(ctx, j.Key); err != nil { + m.logger.Errorf("failed to fetch object ids for slab key; %w", err) + } else { + objectIds = res + } + + // register the alert if res.SurchargeApplied { - m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, err)) + m.ap.RegisterAlert(ctx, newCriticalMigrationFailedAlert(j.Key, j.Health, objectIds, err)) } else { - m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.Key, j.Health, err)) + m.ap.RegisterAlert(ctx, newMigrationFailedAlert(j.Key, j.Health, objectIds, err)) } } } else { @@ -274,3 +282,34 @@ OUTER: return } } + +func (m *migrator) objectIDsForSlabKey(ctx context.Context, key object.EncryptionKey) (map[string][]string, error) { + // fetch all buckets + // + // NOTE:at the time of writing the bus does not support fetching objects by + // slab key across all buckets at once, therefor we have to list all buckets + // and loop over them, revisit on the next major release + buckets, err := m.ap.bus.ListBuckets(ctx) + if err != nil { + return nil, fmt.Errorf("%w; failed to list buckets", err) + } + + // fetch all objects per bucket + idsPerBucket := make(map[string][]string) + for _, bucket := range buckets { + objects, err := m.ap.bus.ObjectsBySlabKey(ctx, bucket.Name, key) + if err != nil { + m.logger.Errorf("failed to fetch objects for slab key in bucket %v; %w", bucket, err) + continue + } else if len(objects) == 0 { + continue + } + + idsPerBucket[bucket.Name] = make([]string, len(objects)) + for i, object := range objects { + idsPerBucket[bucket.Name][i] = object.Name + } + } + + return idsPerBucket, nil +} diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index 91bcc20b7..7b8e7b072 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -4,11 +4,14 @@ import ( "bytes" "context" "errors" + "fmt" + "reflect" "testing" "time" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" @@ -19,27 +22,29 @@ func TestMigrations(t *testing.T) { t.SkipNow() } - // create a new test cluster + // configure the cluster to use one extra host + rs := test.RedundancySettings cfg := test.AutopilotConfig - cfg.Contracts.Amount = uint64(test.RedundancySettings.TotalShards) + 1 + cfg.Contracts.Amount = uint64(rs.TotalShards) + 1 + + // create a new test cluster cluster := newTestCluster(t, testClusterOptions{ - // configure the cluster to use 1 more host than the total shards in the - // redundancy settings. autopilotSettings: &cfg, - hosts: int(test.RedundancySettings.TotalShards) + 1, + hosts: int(cfg.Contracts.Amount), }) defer cluster.Shutdown() + // convenience variables + b := cluster.Bus + w := cluster.Worker + tt := cluster.tt + // create a helper to fetch used hosts usedHosts := func(path string) map[types.PublicKey]struct{} { - // fetch used hosts - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) - if err != nil { - t.Fatal(err) - } else if res.Object == nil { + res, _ := b.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) + if res.Object == nil { t.Fatal("object not found") } - used := make(map[types.PublicKey]struct{}) for _, slab := range res.Object.Slabs { for _, sector := range slab.Shards { @@ -49,18 +54,13 @@ func TestMigrations(t *testing.T) { return used } - // convenience variables - w := cluster.Worker - tt := cluster.tt - // add an object data := make([]byte, rhpv2.SectorSize) frand.Read(data) - path := "foo" - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, t.Name(), api.UploadObjectOptions{})) // assert amount of hosts used - used := usedHosts(path) + used := usedHosts(t.Name()) if len(used) != test.RedundancySettings.TotalShards { t.Fatal("unexpected amount of hosts used", len(used), test.RedundancySettings.TotalShards) } @@ -77,13 +77,12 @@ func TestMigrations(t *testing.T) { // assert we migrated away from the bad host tt.Retry(300, 100*time.Millisecond, func() error { - if _, used := usedHosts(path)[removed]; used { + if _, used := usedHosts(t.Name())[removed]; used { return errors.New("host is still used") } return nil }) - - res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, path, api.GetObjectOptions{}) + res, err := cluster.Bus.Object(context.Background(), api.DefaultBucketName, t.Name(), api.GetObjectOptions{}) tt.OK(err) // check slabs @@ -109,8 +108,60 @@ func TestMigrations(t *testing.T) { shardHosts += len(shard.Contracts) } } + // all shards should have 1 host except for 1. So we end up with 4 in total. if shardHosts != 4 { t.Fatalf("expected 4 shard hosts, got %v", shardHosts) } + + // create another bucket and upload an object into it + tt.OK(b.CreateBucket(context.Background(), "newbucket", api.CreateBucketOptions{})) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), "newbucket", t.Name(), api.UploadObjectOptions{})) + + // assert we currently don't have any error/crit alerts + ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) + if ress.Totals.Error+ress.Totals.Critical != 0 { + t.Fatal("unexpected", ress) + } + + // remove all hosts to ensure migrations fail + for _, h := range cluster.hosts { + cluster.RemoveHost(h) + } + + // fetch alerts and collect object ids until we found two + seen := make(map[types.Hash256]struct{}) + got := make(map[string][]string) + tt.Retry(100, 100*time.Millisecond, func() error { + ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) + if ress.Totals.Error+ress.Totals.Critical == 0 { + return errors.New("no migration alerts") + } + for _, alert := range ress.Alerts { + if _, skip := seen[alert.ID]; !skip { + seen[alert.ID] = struct{}{} + if data, ok := alert.Data["objectIDs"]; ok { + if data, ok := data.(map[string]interface{}); ok { + for bucket, ids := range data { + if objectIDs, ok := ids.([]interface{}); ok { + for _, id := range objectIDs { + got[bucket] = append(got[bucket], id.(string)) + } + } + } + } + } + } + } + if len(got) < 2 { + return errors.New("not enought object ids") + } + return nil + }) + if !reflect.DeepEqual(map[string][]string{ + api.DefaultBucketName: {fmt.Sprintf("/%s", t.Name())}, + "newbucket": {fmt.Sprintf("/%s", t.Name())}, + }, got) { + t.Fatal("unexpected", got) + } } From def6135059770c731149e9db9e7b04c6ad8a085a Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 14:48:47 +0100 Subject: [PATCH 074/201] testing: cleanup TestMigrations --- internal/test/e2e/migrations_test.go | 49 +++++++++++++++++----------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index 7b8e7b072..3e99f30a7 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" @@ -114,16 +115,18 @@ func TestMigrations(t *testing.T) { t.Fatalf("expected 4 shard hosts, got %v", shardHosts) } - // create another bucket and upload an object into it + // create another bucket and add an object tt.OK(b.CreateBucket(context.Background(), "newbucket", api.CreateBucketOptions{})) tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), "newbucket", t.Name(), api.UploadObjectOptions{})) - // assert we currently don't have any error/crit alerts + // assert we currently don't have any alerts ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) if ress.Totals.Error+ress.Totals.Critical != 0 { t.Fatal("unexpected", ress) } + // prepare + // remove all hosts to ensure migrations fail for _, h := range cluster.hosts { cluster.RemoveHost(h) @@ -138,30 +141,38 @@ func TestMigrations(t *testing.T) { return errors.New("no migration alerts") } for _, alert := range ress.Alerts { - if _, skip := seen[alert.ID]; !skip { - seen[alert.ID] = struct{}{} - if data, ok := alert.Data["objectIDs"]; ok { - if data, ok := data.(map[string]interface{}); ok { - for bucket, ids := range data { - if objectIDs, ok := ids.([]interface{}); ok { - for _, id := range objectIDs { - got[bucket] = append(got[bucket], id.(string)) - } - } + // skip if already seen + if _, skip := seen[alert.ID]; skip { + continue + } + seen[alert.ID] = struct{}{} + + // skip if not a migration alert + data, ok := alert.Data["objectIDs"].(map[string]interface{}) + if !ok { + continue + } + + // collect all object ids per bucket + for bucket, ids := range data { + if objectIDs, ok := ids.([]interface{}); ok { + for _, id := range objectIDs { + got[bucket] = append(got[bucket], id.(string)) + if len(got) == 2 { + return nil } } } } } - if len(got) < 2 { - return errors.New("not enought object ids") - } - return nil + return errors.New("haven't found two migration alerts yet") }) - if !reflect.DeepEqual(map[string][]string{ + + // assert we found our two objects across two buckets + if want := map[string][]string{ api.DefaultBucketName: {fmt.Sprintf("/%s", t.Name())}, "newbucket": {fmt.Sprintf("/%s", t.Name())}, - }, got) { - t.Fatal("unexpected", got) + }; !reflect.DeepEqual(want, got) { + t.Fatal("unexpected", cmp.Diff(want, got)) } } From 8da8b4f48b7859c570247fe0928ccb31e07ec690 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 14:50:39 +0100 Subject: [PATCH 075/201] testing: remove TestContractMetricsQueryPlan --- stores/sql_test.go | 93 +++++++++++----------------------------------- 1 file changed, 22 insertions(+), 71 deletions(-) diff --git a/stores/sql_test.go b/stores/sql_test.go index 170edbf87..842f3c9df 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -107,8 +107,8 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { conn = NewMySQLConnection(dbUser, dbPassword, dbURI, dbName) connMetrics = NewMySQLConnection(dbUser, dbPassword, dbURI, dbMetricsName) } else if cfg.persistent { - conn = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) - connMetrics = NewSQLiteConnection(filepath.Join(cfg.dir, "metrics.sqlite")) + conn = NewSQLiteConnection(filepath.Join(dir, "db.sqlite")) + connMetrics = NewSQLiteConnection(filepath.Join(dir, "metrics.sqlite")) } else { conn = NewEphemeralSQLiteConnection(dbName) connMetrics = NewEphemeralSQLiteConnection(dbMetricsName) @@ -292,7 +292,7 @@ func TestConsensusReset(t *testing.T) { }) // Reset the consensus. - if err := ss.ResetConsensusSubscription(); err != nil { + if err := ss.ResetConsensusSubscription(context.Background()); err != nil { t.Fatal(err) } @@ -332,27 +332,20 @@ type sqliteQueryPlan struct { Detail string `json:"detail"` } -func (p sqliteQueryPlan) usesIndex(index string) bool { +func (p sqliteQueryPlan) usesIndex() bool { d := strings.ToLower(p.Detail) - if index == "" { - return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") - } - return strings.Contains(d, fmt.Sprintf("using index %s", index)) + return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") } //nolint:tagliatelle type mysqlQueryPlan struct { Extra string `json:"Extra"` PossibleKeys string `json:"possible_keys"` - Key string `json:"key"` } -func (p mysqlQueryPlan) usesIndex(index string) bool { - if index == "" { - d := strings.ToLower(p.Extra) - return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") - } - return p.Key == index +func (p mysqlQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Extra) + return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") } func TestQueryPlan(t *testing.T) { @@ -388,66 +381,24 @@ func TestQueryPlan(t *testing.T) { } for _, query := range queries { - plan := queryPlan(ss.db) - if err := explainQuery(ss.db, query, plan); err != nil { - t.Fatal(err) - } else if !plan.usesIndex("") { - t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, plan) - } - } -} - -func TestContractMetricsQueryPlan(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - db := ss.dbMetrics - - query := "SELECT * FROM contracts c WHERE c.timestamp >= 1 AND c.timestamp < 2 AND c.fcid = '' LIMIT 1" - plan := queryPlan(db) - if err := explainQuery(db, query, plan); err != nil { - t.Fatal(err) - } - - if isSQLite(db) { - // SQLite uses the index by default - if !plan.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("unexpected query plan %+v", plan) - } - } else { - // MySQL uses an index, but not 'idx_contracts_fcid_timestamp' - if !plan.usesIndex("") || plan.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("unexpected query plan %+v", plan) - } - - // redo the query with hint - queryWithHint := strings.Replace(query, "WHERE", "USE INDEX (idx_contracts_fcid_timestamp) WHERE", 1) - if err := explainQuery(db, queryWithHint, plan); err != nil { - t.Fatal(err) - } - - // assert it uses 'idx_contracts_fcid_timestamp' now - if !plan.usesIndex("idx_contracts_fcid_timestamp") { - t.Fatalf("unexpected query plan %+v", plan) + if isSQLite(ss.db) { + var explain sqliteQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } + } else { + var explain mysqlQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } } } } -func queryPlan(db *gorm.DB) interface{ usesIndex(index string) bool } { - if isSQLite(db) { - return &sqliteQueryPlan{} - } - return &mysqlQueryPlan{} -} - -func explainQuery(db *gorm.DB, query string, res interface{}) (err error) { - if isSQLite(db) { - err = db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&res).Error - } else { - err = db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&res).Error - } - return -} - func TestApplyUpdatesErr(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() From a32e7e24b1f07ff19f1767a22ce3c3ccea233856 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 15:13:50 +0100 Subject: [PATCH 076/201] logger: update encoding for json encoder time/duration --- cmd/renterd/logger.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/renterd/logger.go b/cmd/renterd/logger.go index 22308d88b..4b21a1925 100644 --- a/cmd/renterd/logger.go +++ b/cmd/renterd/logger.go @@ -91,7 +91,10 @@ func NewLogger(dir string, cfg config.Log) (*zap.Logger, func(context.Context) e // jsonEncoder returns a zapcore.Encoder that encodes logs as JSON intended for // parsing. func jsonEncoder() zapcore.Encoder { - return zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()) + cfg := zap.NewProductionEncoderConfig() + cfg.EncodeTime = zapcore.RFC3339TimeEncoder + cfg.EncodeDuration = zapcore.StringDurationEncoder + return zapcore.NewJSONEncoder(cfg) } // humanEncoder returns a zapcore.Encoder that encodes logs as human-readable From 36021ab5deef1755269106ba6b7ca0a3afb7c695 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 19 Mar 2024 21:28:33 +0100 Subject: [PATCH 077/201] bus: deprecate /hosts endpoint infavour of /search/hosts --- api/host.go | 15 ++-- autopilot/autopilot.go | 3 +- autopilot/contractor.go | 2 +- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 4 +- bus/bus.go | 24 ++---- bus/client/hosts.go | 2 +- internal/test/e2e/blocklist_test.go | 8 +- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 5 -- stores/hostdb_test.go | 125 +++++++++++++++------------- 12 files changed, 93 insertions(+), 103 deletions(-) diff --git a/api/host.go b/api/host.go index ba66ffd58..2a9df5f6b 100644 --- a/api/host.go +++ b/api/host.go @@ -70,16 +70,16 @@ type ( // Option types. type ( - HostsOptions struct { - Offset int - Limit int - FilterMode string + GetHostsOptions struct { + Offset int + Limit int } HostsForScanningOptions struct { MaxLastScan TimeRFC3339 Limit int Offset int } + SearchHostOptions struct { AddressContains string FilterMode string @@ -92,20 +92,17 @@ type ( func DefaultSearchHostOptions() SearchHostOptions { return SearchHostOptions{ Limit: -1, - FilterMode: HostFilterModeAll, + FilterMode: HostFilterModeAllowed, } } -func (opts HostsOptions) Apply(values url.Values) { +func (opts GetHostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) } if opts.Limit != 0 { values.Set("limit", fmt.Sprint(opts.Limit)) } - if opts.FilterMode != "" { - values.Set("filterMode", opts.FilterMode) - } } func (opts HostsForScanningOptions) Apply(values url.Values) { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index d8a760265..3b90a8329 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -54,7 +54,6 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) @@ -196,7 +195,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.Hosts(ctx, api.HostsOptions{}) + hosts, err := ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) if jc.Check("failed to get hosts", err) != nil { return } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 47a03480f..ef64f630b 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.Hosts(ctx, api.HostsOptions{}) + hosts, err := c.ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) if err != nil { return false, err } diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 85301822c..76643e5b5 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 481b78046..1cdd096d2 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.HostInfo, error) { +func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -40,7 +40,7 @@ func (b *mockBus) Hosts(ctx context.Context, opts api.HostsOptions) ([]hostdb.Ho } func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { - hosts, err := b.Hosts(ctx, api.HostsOptions{ + hosts, err := b.SearchHosts(ctx, api.SearchHostOptions{ Offset: opts.Offset, Limit: opts.Limit, }) diff --git a/bus/bus.go b/bus/bus.go index 3838a1877..0a0614cbf 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -92,7 +92,6 @@ type ( // A HostDB stores information about hosts. HostDB interface { Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) - Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error @@ -285,7 +284,7 @@ func (b *bus) Handler() http.Handler { "GET /contract/:id/roots": b.contractIDRootsHandlerGET, "GET /contract/:id/size": b.contractSizeHandlerGET, - "GET /hosts": b.hostsHandlerGET, + "GET /hosts": b.hostsHandlerGETDeprecated, "GET /hosts/allowlist": b.hostsAllowlistHandlerGET, "PUT /hosts/allowlist": b.hostsAllowlistHandlerPUT, "GET /hosts/blocklist": b.hostsBlocklistHandlerGET, @@ -755,26 +754,15 @@ func (b *bus) walletPendingHandler(jc jape.Context) { jc.Encode(relevant) } -func (b *bus) hostsHandlerGET(jc jape.Context) { +func (b *bus) hostsHandlerGETDeprecated(jc jape.Context) { offset := 0 limit := -1 - filterMode := api.HostFilterModeAllowed - if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil || jc.DecodeForm("filterMode", &filterMode) != nil { - return - } - - // validate filterMode - switch filterMode { - case api.HostFilterModeAllowed: - case api.HostFilterModeBlocked: - case api.HostFilterModeAll: - default: - jc.Error(errors.New("invalid filter mode"), http.StatusBadRequest) + if jc.DecodeForm("offset", &offset) != nil || jc.DecodeForm("limit", &limit) != nil { return } // fetch hosts - hosts, err := b.hdb.Hosts(jc.Request.Context(), filterMode, offset, limit) + hosts, err := b.hdb.SearchHosts(jc.Request.Context(), api.HostFilterModeAllowed, "", nil, offset, limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", offset, offset+limit), err) != nil { return } @@ -786,6 +774,10 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } + + // TODO: on the next major release we should: + // - remove api.DefaultSearchHostOptions and set defaults in the handler + // - validate the filter mode here and return a 400 hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.FilterMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 70c8b3431..1ebf14e1f 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -30,7 +30,7 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err } // Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.HostsOptions) (hosts []hostdb.HostInfo, err error) { +func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []hostdb.HostInfo, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index e371f01d4..06f7e133d 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -23,6 +23,8 @@ func TestBlocklist(t *testing.T) { hosts: 3, }) defer cluster.Shutdown() + + // convenience variables b := cluster.Bus tt := cluster.tt @@ -117,7 +119,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -142,7 +144,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -152,7 +154,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.Hosts(context.Background(), api.HostsOptions{}) + hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 65febbbf7..69ac90391 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.Hosts(context.Background(), api.HostsOptions{}) + hosts, err := cluster.Bus.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index b5f6cccd0..060152167 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.Hosts(context.Background(), api.HostsOptions{}) + hostss, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.Hosts(context.Background(), api.HostsOptions{}) + hostss, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index 101ee298d..0a6fb00f6 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -530,11 +530,6 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains return hosts, err } -// Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, filterMode string, offset, limit int) ([]hostdb.HostInfo, error) { - return ss.SearchHosts(ctx, filterMode, "", nil, offset, limit) -} - func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 528700502..735ea4190 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + allHosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -158,39 +158,65 @@ func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, setti }) } -// TestSQLHosts tests the Hosts method of the SQLHostDB type. -func TestSQLHosts(t *testing.T) { +// TestSearchHosts is a unit tests for the SearchHosts method. +func TestSearchHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) + // add 3 hosts + var hks []types.PublicKey + for i := 1; i <= 3; i++ { + if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i)); err != nil { + t.Fatal(err) + } + hks = append(hks, types.PublicKey{byte(i)}) } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - // assert the hosts method returns the expected hosts - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + // assert defaults return all hosts + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, 1); err != nil || len(hosts) != 1 { + + // assert we can search using offset and limit + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.Hosts(ctx, api.HostFilterModeAllowed, -1, -1); err != ErrNegativeOffset { + if _, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } + // assert we can search by address + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by key + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by address and key + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + + // assert we can search by key and limit + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // add a custom host and block it hk4 := types.PublicKey{4} if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { @@ -201,13 +227,27 @@ func TestSQLHosts(t *testing.T) { } // assert host filter mode is applied - if hosts, err := ss.Hosts(ctx, api.HostFilterModeAll, 0, -1); err != nil || len(hosts) != 4 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", nil, 0, -1); err != nil || len(hosts) != 4 { t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeBlocked, 0, -1); err != nil || len(hosts) != 1 { + } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeBlocked, "", nil, 0, -1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1); err != nil || len(hosts) != 3 { + } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } +} + +// TestHostsForScanning is a unit test for the HostsForScanning method. +func TestHostsForScanning(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + ctx := context.Background() + + // add 3 hosts + hks, err := ss.addTestHosts(3) + if err != nil { + t.Fatal(err) + } + hk1, hk2, hk3 := hks[0], hks[1], hks[2] // add a scan for every non-blocked host n := time.Now() @@ -222,20 +262,19 @@ func TestSQLHosts(t *testing.T) { } // fetch all hosts using the HostsForScanning method - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 4) + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, -1) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 4 { + } else if len(hostAddresses) != 3 { t.Fatal("wrong number of addresses") - } else if hostAddresses[0].PublicKey != hk4 || - hostAddresses[1].PublicKey != hk3 || - hostAddresses[2].PublicKey != hk2 || - hostAddresses[3].PublicKey != hk1 { + } else if hostAddresses[0].PublicKey != hk3 || + hostAddresses[1].PublicKey != hk2 || + hostAddresses[2].PublicKey != hk1 { t.Fatal("wrong key") } - // fetch one host by setting the cutoff exactly to hk3 - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-3*time.Minute), 0, -1) + // fetch one host by setting the cutoff exactly to hk2 + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, -1) if err != nil { t.Fatal(err) } else if len(hostAddresses) != 1 { @@ -243,7 +282,7 @@ func TestSQLHosts(t *testing.T) { } // fetch no hosts - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) + hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, -1) if err != nil { t.Fatal(err) } else if len(hostAddresses) != 0 { @@ -251,40 +290,6 @@ func TestSQLHosts(t *testing.T) { } } -// TestSearchHosts is a unit test for SearchHosts. -func TestSearchHosts(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - // add 3 hosts - var hks []types.PublicKey - for i := 0; i < 3; i++ { - if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i+1)); err != nil { - t.Fatal(err) - } - hks = append(hks, types.PublicKey{byte(i)}) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // Search by address. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by address and key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - // Filter by key and limit results - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } -} - // TestRecordScan is a test for recording scans. func TestRecordScan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) @@ -606,7 +611,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } @@ -778,7 +783,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.Hosts(ctx, api.HostFilterModeAllowed, 0, -1) + hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) if err != nil { t.Fatal(err) } From ef357f6e14a7b02c610cfedf5be89caaf041cc5a Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:34:57 +0100 Subject: [PATCH 078/201] stores: revert changes --- api/host.go | 7 -- autopilot/autopilot.go | 2 +- autopilot/contractor.go | 2 +- internal/test/e2e/blocklist_test.go | 8 +- internal/test/e2e/cluster_test.go | 2 +- internal/test/e2e/pruning_test.go | 4 +- stores/hostdb.go | 5 + stores/hostdb_test.go | 154 +++++++++++++--------------- 8 files changed, 82 insertions(+), 102 deletions(-) diff --git a/api/host.go b/api/host.go index 2a9df5f6b..5536f755c 100644 --- a/api/host.go +++ b/api/host.go @@ -89,13 +89,6 @@ type ( } ) -func DefaultSearchHostOptions() SearchHostOptions { - return SearchHostOptions{ - Limit: -1, - FilterMode: HostFilterModeAllowed, - } -} - func (opts GetHostsOptions) Apply(values url.Values) { if opts.Offset != 0 { values.Set("offset", fmt.Sprint(opts.Offset)) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 3b90a8329..111dadb6c 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -195,7 +195,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { state := ap.State() // fetch hosts - hosts, err := ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) + hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) if jc.Check("failed to get hosts", err) != nil { return } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index ef64f630b..43ac5e629 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -249,7 +249,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.SearchHosts(ctx, api.DefaultSearchHostOptions()) + hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) if err != nil { return false, err } diff --git a/internal/test/e2e/blocklist_test.go b/internal/test/e2e/blocklist_test.go index 06f7e133d..64acc2fba 100644 --- a/internal/test/e2e/blocklist_test.go +++ b/internal/test/e2e/blocklist_test.go @@ -23,8 +23,6 @@ func TestBlocklist(t *testing.T) { hosts: 3, }) defer cluster.Shutdown() - - // convenience variables b := cluster.Bus tt := cluster.tt @@ -119,7 +117,7 @@ func TestBlocklist(t *testing.T) { } // assert we have 4 hosts - hosts, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err := b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 4 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -144,7 +142,7 @@ func TestBlocklist(t *testing.T) { } // assert all others are blocked - hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 1 { t.Fatal("unexpected number of hosts", len(hosts)) @@ -154,7 +152,7 @@ func TestBlocklist(t *testing.T) { tt.OK(b.UpdateHostAllowlist(context.Background(), nil, nil, true)) // assert no hosts are blocked - hosts, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hosts) != 5 { t.Fatal("unexpected number of hosts", len(hosts)) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 69ac90391..2346f7019 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -146,7 +146,7 @@ func TestNewTestCluster(t *testing.T) { }) // Get host info for every host. - hosts, err := cluster.Bus.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) for _, host := range hosts { hi, err := cluster.Autopilot.HostInfo(host.PublicKey) diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index 060152167..de948c970 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -84,7 +84,7 @@ func TestHostPruning(t *testing.T) { } // assert the host was not pruned - hostss, err := b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hostss) != 1 { t.Fatal("host was pruned") @@ -96,7 +96,7 @@ func TestHostPruning(t *testing.T) { // assert the host was pruned tt.Retry(10, time.Second, func() error { - hostss, err = b.SearchHosts(context.Background(), api.DefaultSearchHostOptions()) + hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hostss) != 0 { return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) diff --git a/stores/hostdb.go b/stores/hostdb.go index 0a6fb00f6..95e37a26c 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -530,6 +530,11 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains return hosts, err } +// Hosts returns non-blocked hosts at given offset and limit. +func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]hostdb.HostInfo, error) { + return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) +} + func (ss *SQLStore) RemoveOfflineHosts(ctx context.Context, minRecentFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { // sanity check 'maxDowntime' if maxDowntime < 0 { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 735ea4190..35872ea2d 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -53,7 +53,7 @@ func TestSQLHostDB(t *testing.T) { } // Assert it's returned - allHosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + allHosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } @@ -158,98 +158,40 @@ func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, setti }) } -// TestSearchHosts is a unit tests for the SearchHosts method. -func TestSearchHosts(t *testing.T) { +// TestSQLHosts tests the Hosts method of the SQLHostDB type. +func TestSQLHosts(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() ctx := context.Background() - // add 3 hosts - var hks []types.PublicKey - for i := 1; i <= 3; i++ { - if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i)); err != nil { - t.Fatal(err) - } - hks = append(hks, types.PublicKey{byte(i)}) + hks, err := ss.addTestHosts(3) + if err != nil { + t.Fatal(err) } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - // assert defaults return all hosts - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { + // assert the hosts method returns the expected hosts + if hosts, err := ss.Hosts(ctx, 0, -1); err != nil || len(hosts) != 3 { t.Fatal("unexpected", len(hosts), err) } - - // assert we can search using offset and limit - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, 0, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk1 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 1, 1); err != nil || len(hosts) != 1 { + if hosts, err := ss.Hosts(ctx, 1, 1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } else if host := hosts[0]; host.PublicKey != hk2 { t.Fatal("unexpected host", hk1, hk2, hk3, host.PublicKey) } - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 3, 1); err != nil || len(hosts) != 0 { + if hosts, err := ss.Hosts(ctx, 3, 1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } - if _, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, -1, -1); err != ErrNegativeOffset { + if _, err := ss.Hosts(ctx, -1, -1); err != ErrNegativeOffset { t.Fatal("unexpected error", err) } - // assert we can search by address - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by key - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by address and key - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // assert we can search by key and limit - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } - - // add a custom host and block it - hk4 := types.PublicKey{4} - if err := ss.addCustomTestHost(hk4, "host4.com"); err != nil { - t.Fatal("unexpected", err) - } - if err := ss.UpdateHostBlocklistEntries(context.Background(), []string{"host4.com"}, nil, false); err != nil { - t.Fatal("unexpected", err) - } - - // assert host filter mode is applied - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", nil, 0, -1); err != nil || len(hosts) != 4 { - t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeBlocked, "", nil, 0, -1); err != nil || len(hosts) != 1 { - t.Fatal("unexpected", len(hosts), err) - } else if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1); err != nil || len(hosts) != 3 { - t.Fatal("unexpected", len(hosts), err) - } -} - -// TestHostsForScanning is a unit test for the HostsForScanning method. -func TestHostsForScanning(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - ctx := context.Background() - - // add 3 hosts - hks, err := ss.addTestHosts(3) - if err != nil { - t.Fatal(err) - } - hk1, hk2, hk3 := hks[0], hks[1], hks[2] - - // add a scan for every non-blocked host + // Add a scan for each host. n := time.Now() if err := ss.addTestScan(hk1, n.Add(-time.Minute), nil, rhpv2.HostSettings{}); err != nil { t.Fatal(err) @@ -261,35 +203,77 @@ func TestHostsForScanning(t *testing.T) { t.Fatal(err) } - // fetch all hosts using the HostsForScanning method - hostAddresses, err := ss.HostsForScanning(ctx, n, 0, -1) + // Fetch all hosts using the HostsForScanning method. + hostAddresses, err := ss.HostsForScanning(ctx, n, 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 3 { + } + if len(hostAddresses) != 3 { t.Fatal("wrong number of addresses") - } else if hostAddresses[0].PublicKey != hk3 || - hostAddresses[1].PublicKey != hk2 || - hostAddresses[2].PublicKey != hk1 { + } + if hostAddresses[0].PublicKey != hk3 { + t.Fatal("wrong key") + } + if hostAddresses[1].PublicKey != hk2 { + t.Fatal("wrong key") + } + if hostAddresses[2].PublicKey != hk1 { t.Fatal("wrong key") } - // fetch one host by setting the cutoff exactly to hk2 - hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, -1) + // Fetch one host by setting the cutoff exactly to hk2. + hostAddresses, err = ss.HostsForScanning(ctx, n.Add(-2*time.Minute), 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 1 { + } + if len(hostAddresses) != 1 { t.Fatal("wrong number of addresses") } - // fetch no hosts - hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, -1) + // Fetch no hosts. + hostAddresses, err = ss.HostsForScanning(ctx, time.Time{}, 0, 3) if err != nil { t.Fatal(err) - } else if len(hostAddresses) != 0 { + } + if len(hostAddresses) != 0 { t.Fatal("wrong number of addresses") } } +// TestSearchHosts is a unit test for SearchHosts. +func TestSearchHosts(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + ctx := context.Background() + + // add 3 hosts + var hks []types.PublicKey + for i := 0; i < 3; i++ { + if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i+1)); err != nil { + t.Fatal(err) + } + hks = append(hks, types.PublicKey{byte(i)}) + } + hk1, hk2, hk3 := hks[0], hks[1], hks[2] + + // Search by address. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by key. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by address and key. + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } + // Filter by key and limit results + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { + t.Fatal("unexpected", len(hosts), err) + } +} + // TestRecordScan is a test for recording scans. func TestRecordScan(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) @@ -611,7 +595,7 @@ func TestSQLHostAllowlist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } @@ -783,7 +767,7 @@ func TestSQLHostBlocklist(t *testing.T) { numHosts := func() int { t.Helper() - hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, 0, -1) + hosts, err := ss.Hosts(ctx, 0, -1) if err != nil { t.Fatal(err) } From e71862eef3798fe646fdb5060d87c3d1b3f17541 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:37:36 +0100 Subject: [PATCH 079/201] all: cleanup PR --- bus/bus.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index 0a0614cbf..7d33964be 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -775,9 +775,9 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { return } - // TODO: on the next major release we should: - // - remove api.DefaultSearchHostOptions and set defaults in the handler - // - validate the filter mode here and return a 400 + // TODO: on the next major release + // - set defaults in handler + // - validate request params and return 400 if invalid hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.FilterMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return From f3e69a5715a73136944165fd6c8c97681e8988a7 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 09:44:57 +0100 Subject: [PATCH 080/201] linter: reconfigure gocritic --- .github/workflows/test.yml | 14 ++--- .golangci.yml | 86 ++++++++++++++++++++++--------- api/object.go | 4 +- autopilot/hosts.go | 2 +- internal/test/e2e/cluster_test.go | 11 +++- stores/metadata_test.go | 2 +- worker/mocks_test.go | 2 +- worker/rhpv2.go | 10 +++- worker/rhpv3.go | 7 ++- worker/worker.go | 3 +- 10 files changed, 97 insertions(+), 44 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bb56bbdb2..cc6c5c1f3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,13 +17,6 @@ jobs: - name: Configure Windows if: matrix.os == 'windows-latest' run: git config --global core.autocrlf false # fixes go lint fmt error - - name: Configure MySQL - if: matrix.os == 'ubuntu-latest' - uses: mirromutth/mysql-action@v1.1 - with: - host port: 3800 - mysql version: '8' - mysql root password: test - name: Checkout uses: actions/checkout@v3 - name: Setup Go @@ -43,6 +36,13 @@ jobs: autopilot bus bus/client worker worker/client + - name: Configure MySQL + if: matrix.os == 'ubuntu-latest' + uses: mirromutth/mysql-action@v1.1 + with: + host port: 3800 + mysql version: '8' + mysql root password: test - name: Test uses: n8maninger/action-golang-test@v1 with: diff --git a/.golangci.yml b/.golangci.yml index ace11db65..ad9bf1f64 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,13 +17,6 @@ run: # list of build tags, all linters use it. Default is empty list. build-tags: [] - # which dirs to skip: issues from them won't be reported; - # can use regexp here: generated.*, regexp is applied on full path; - # default value is empty list, but default dirs are skipped independently - # from this option's value (see skip-dirs-use-default). - skip-dirs: - - cover - # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true @@ -37,7 +30,7 @@ run: # output configuration options output: # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: colored-line-number + formats: colored-line-number # print lines of code with issue, default is true print-issued-lines: true @@ -61,23 +54,66 @@ linters-settings: # See https://go-critic.github.io/overview#checks-overview # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run` # By default list of stable checks is used. - enabled-checks: - - argOrder # Diagnostic options - - badCond - - caseOrder - - dupArg - - dupBranchBody - - dupCase - - dupSubExpr - - nilValReturn - - offBy1 - - weakCond - - boolExprSimplify # Style options here and below. - - builtinShadow - - emptyFallthrough - - hexLiteral - - underef - - equalFold + enabled-tags: + - diagnostic + - style + disabled-checks: + # diagnostic + - badRegexp + - badSorting + - badSyncOnceFunc + - builtinShadowDecl + - commentedOutCode + - dynamicFmtString + - emptyDecl + - evalOrder + - externalErrorReassign + - filepathJoin + - regexpPattern + - returnAfterHttpError + - sloppyReassign + - sortSlice + - sprintfQuotedString + - sqlQuery + - syncMapLoadAndDelete + - truncateCmp + - uncheckedInlineErr + - unnecessaryDefer + + # style + - commentedOutImport + - deferUnlambda + - docStub + - dupImport + - emptyStringTest + - exitAfterDefer + - exposedSyncMutex + - httpNoBody + - ifElseChain + - importShadow + - initClause + - methodExprCall + - nestingReduce + - octalLiteral + - paramTypeCombine + - preferFilepathJoin + - ptrToRefParam + - redundantSprint + - regexpSimplify + - ruleguard + - stringConcatSimplify + - stringsCompare + - timeExprSimplify + - todoCommentWithoutDetail + - tooManyResultsChecker + - typeAssertChain + - typeDefFirst + - typeUnparen + - unlabelStmt + - unnamedResult + - unnecessaryBlock + - whyNoLint + - yodaStyleExpr revive: ignore-generated-header: true rules: diff --git a/api/object.go b/api/object.go index 36cea9db8..b55191873 100644 --- a/api/object.go +++ b/api/object.go @@ -367,8 +367,8 @@ func (opts SearchObjectOptions) Apply(values url.Values) { } } -func FormatETag(ETag string) string { - return fmt.Sprintf("\"%s\"", ETag) +func FormatETag(eTag string) string { + return fmt.Sprintf("\"%s\"", eTag) } func ObjectPathEscape(path string) string { diff --git a/autopilot/hosts.go b/autopilot/hosts.go index 69fcf9776..aba45ee87 100644 --- a/autopilot/hosts.go +++ b/autopilot/hosts.go @@ -33,7 +33,7 @@ func (hosts scoredHosts) randSelectByScore(n int) (selected []scoredHost) { total += h.score } for i := range candidates { - candidates[i].score = candidates[i].score / total + candidates[i].score /= total } // select diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2346f7019..e081c2a5d 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -2062,6 +2062,9 @@ func TestMultipartUploads(t *testing.T) { etag1 := putPart(1, 0, data1) etag3 := putPart(3, len(data1)+len(data2), data3) size := int64(len(data1) + len(data2) + len(data3)) + expectedData := data1 + expectedData = append(expectedData, data2...) + expectedData = append(expectedData, data3...) // List parts mup, err := b.MultipartUploadParts(context.Background(), api.DefaultBucketName, objPath, mpr.UploadID, 0, 0) @@ -2118,7 +2121,7 @@ func TestMultipartUploads(t *testing.T) { t.Fatal("unexpected size:", gor.Size) } else if data, err := io.ReadAll(gor.Content); err != nil { t.Fatal(err) - } else if expectedData := append(data1, append(data2, data3...)...); !bytes.Equal(data, expectedData) { + } else if !bytes.Equal(data, expectedData) { t.Fatal("unexpected data:", cmp.Diff(data, expectedData)) } @@ -2417,6 +2420,11 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { }) tt.OK(err) + // combine all parts data + expectedData := part1Data + expectedData = append(expectedData, part2Data...) + expectedData = append(expectedData, part3Data...) + // finish the upload tt.OKAll(b.CompleteMultipartUpload(context.Background(), api.DefaultBucketName, objPath, mpr.UploadID, []api.MultipartCompletedPart{ { @@ -2436,7 +2444,6 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { // download the object and verify its integrity dst := new(bytes.Buffer) tt.OK(w.DownloadObject(context.Background(), dst, api.DefaultBucketName, objPath, api.DownloadObjectOptions{})) - expectedData := append(part1Data, append(part2Data, part3Data...)...) receivedData := dst.Bytes() if len(receivedData) != len(expectedData) { t.Fatalf("expected %v bytes, got %v", len(expectedData), len(receivedData)) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index c6ac1cd52..c16f927d1 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4538,7 +4538,7 @@ func TestTypeCurrency(t *testing.T) { var result bool query := fmt.Sprintf("SELECT ? %s ?", test.cmp) if !isSQLite(ss.db) { - query = strings.Replace(query, "?", "HEX(?)", -1) + query = strings.ReplaceAll(query, "?", "HEX(?)") } if err := ss.db.Raw(query, test.a, test.b).Scan(&result).Error; err != nil { t.Fatal(err) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 4f7c24b8f..7b3609c0b 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -375,7 +375,7 @@ func newObjectStoreMock(bucket string) *objectStoreMock { return os } -func (os *objectStoreMock) AddMultipartPart(ctx context.Context, bucket, path, contractSet, ETag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { +func (os *objectStoreMock) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices []object.SlabSlice) (err error) { return nil } diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 9f05904a4..beab25a65 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -217,8 +217,14 @@ func RPCFormContract(ctx context.Context, t *rhpv2.Transport, renterKey types.Pr return rhpv2.ContractRevision{}, nil, err } - txn.Signatures = append(renterContractSignatures, hostSigs.ContractSignatures...) - signedTxnSet := append(resp.Parents, append(parents, txn)...) + txn.Signatures = make([]types.TransactionSignature, 0, len(renterContractSignatures)+len(hostSigs.ContractSignatures)) + txn.Signatures = append(txn.Signatures, renterContractSignatures...) + txn.Signatures = append(txn.Signatures, hostSigs.ContractSignatures...) + + signedTxnSet := make([]types.Transaction, 0, len(resp.Parents)+len(parents)+1) + signedTxnSet = append(signedTxnSet, resp.Parents...) + signedTxnSet = append(signedTxnSet, parents...) + signedTxnSet = append(signedTxnSet, txn) return rhpv2.ContractRevision{ Revision: initRevision, Signatures: [2]types.TransactionSignature{ diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 8db6dc9d5..c0404b128 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -511,7 +511,9 @@ func (a *accounts) deriveAccountKey(hostKey types.PublicKey) types.PrivateKey { // Append the host for which to create it and the index to the // corresponding sub-key. subKey := a.key - data := append(subKey, hostKey[:]...) + data := make([]byte, 0, len(subKey)+len(hostKey)+1) + data = append(data, subKey[:]...) + data = append(data, hostKey[:]...) data = append(data, index) seed := types.HashBytes(data) @@ -1078,7 +1080,8 @@ func RPCRenew(ctx context.Context, rrr api.RHPRenewRequest, bus Bus, t *transpor txn.Signatures = append(txn.Signatures, hostSigs.TransactionSignatures...) // Add the parents to get the full txnSet. - txnSet = append(parents, txn) + txnSet = parents + txnSet = append(txnSet, txn) return rhpv2.ContractRevision{ Revision: noOpRevision, diff --git a/worker/worker.go b/worker/worker.go index 0868c347c..707a6a7f1 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -506,7 +506,8 @@ func (w *worker) rhpBroadcastHandler(jc jape.Context) { return } // Broadcast the txn. - txnSet := append(parents, txn) + txnSet := parents + txnSet = append(txnSet, txn) err = w.bus.BroadcastTransaction(ctx, txnSet) if jc.Check("failed to broadcast transaction", err) != nil { _ = w.bus.WalletDiscard(ctx, txn) From 89850c8cb18cb8bb807e5d19132817c7b1720485 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 11:09:27 +0100 Subject: [PATCH 081/201] testing: cleanup TestMigrations --- internal/test/e2e/migrations_test.go | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/internal/test/e2e/migrations_test.go b/internal/test/e2e/migrations_test.go index 3e99f30a7..b049da908 100644 --- a/internal/test/e2e/migrations_test.go +++ b/internal/test/e2e/migrations_test.go @@ -125,28 +125,18 @@ func TestMigrations(t *testing.T) { t.Fatal("unexpected", ress) } - // prepare - // remove all hosts to ensure migrations fail for _, h := range cluster.hosts { cluster.RemoveHost(h) } // fetch alerts and collect object ids until we found two - seen := make(map[types.Hash256]struct{}) - got := make(map[string][]string) + var got map[string][]string tt.Retry(100, 100*time.Millisecond, func() error { - ress, _ := b.Alerts(context.Background(), alerts.AlertsOpts{}) - if ress.Totals.Error+ress.Totals.Critical == 0 { - return errors.New("no migration alerts") - } + got = make(map[string][]string) + ress, err := b.Alerts(context.Background(), alerts.AlertsOpts{}) + tt.OK(err) for _, alert := range ress.Alerts { - // skip if already seen - if _, skip := seen[alert.ID]; skip { - continue - } - seen[alert.ID] = struct{}{} - // skip if not a migration alert data, ok := alert.Data["objectIDs"].(map[string]interface{}) if !ok { @@ -158,14 +148,14 @@ func TestMigrations(t *testing.T) { if objectIDs, ok := ids.([]interface{}); ok { for _, id := range objectIDs { got[bucket] = append(got[bucket], id.(string)) - if len(got) == 2 { - return nil - } } } } } - return errors.New("haven't found two migration alerts yet") + if len(got) != 2 { + return errors.New("unexpected number of buckets") + } + return nil }) // assert we found our two objects across two buckets From d5ae293472795e65ddfabbcfb036a81850c665e7 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 11:20:06 +0100 Subject: [PATCH 082/201] linter: drop default format --- .golangci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ad9bf1f64..017b68431 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -29,9 +29,6 @@ run: # output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - formats: colored-line-number - # print lines of code with issue, default is true print-issued-lines: true From 1252ed455b79774b5fd8f7b414af94f5b64c5b97 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 11:31:55 +0100 Subject: [PATCH 083/201] linter: enable all the things --- .golangci.yml | 38 -------------------------------------- api/host.go | 2 +- api/object.go | 2 +- api/param.go | 2 +- api/setting.go | 4 ++-- bus/client/metrics.go | 4 ++-- bus/client/slabs.go | 2 +- bus/client/wallet.go | 2 +- cmd/renterd/config.go | 2 +- cmd/renterd/main.go | 4 ++-- hostdb/hostdb.go | 2 +- worker/client/client.go | 4 ++-- 12 files changed, 15 insertions(+), 53 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 017b68431..d439ef177 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -56,61 +56,23 @@ linters-settings: - style disabled-checks: # diagnostic - - badRegexp - - badSorting - - badSyncOnceFunc - - builtinShadowDecl - commentedOutCode - - dynamicFmtString - - emptyDecl - - evalOrder - - externalErrorReassign - - filepathJoin - - regexpPattern - - returnAfterHttpError - - sloppyReassign - - sortSlice - - sprintfQuotedString - - sqlQuery - - syncMapLoadAndDelete - - truncateCmp - uncheckedInlineErr - - unnecessaryDefer # style - - commentedOutImport - - deferUnlambda - - docStub - - dupImport - - emptyStringTest - exitAfterDefer - - exposedSyncMutex - - httpNoBody - ifElseChain - importShadow - - initClause - - methodExprCall - - nestingReduce - octalLiteral - paramTypeCombine - - preferFilepathJoin - ptrToRefParam - - redundantSprint - - regexpSimplify - - ruleguard - - stringConcatSimplify - stringsCompare - - timeExprSimplify - - todoCommentWithoutDetail - tooManyResultsChecker - - typeAssertChain - typeDefFirst - typeUnparen - unlabelStmt - unnamedResult - - unnecessaryBlock - whyNoLint - - yodaStyleExpr revive: ignore-generated-header: true rules: diff --git a/api/host.go b/api/host.go index aea80a9fe..e1618397a 100644 --- a/api/host.go +++ b/api/host.go @@ -112,6 +112,6 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { values.Set("limit", fmt.Sprint(opts.Limit)) } if !opts.MaxLastScan.IsZero() { - values.Set("lastScan", fmt.Sprint(TimeRFC3339(opts.MaxLastScan))) + values.Set("lastScan", TimeRFC3339(opts.MaxLastScan).String()) } } diff --git a/api/object.go b/api/object.go index b55191873..0382f69a7 100644 --- a/api/object.go +++ b/api/object.go @@ -368,7 +368,7 @@ func (opts SearchObjectOptions) Apply(values url.Values) { } func FormatETag(eTag string) string { - return fmt.Sprintf("\"%s\"", eTag) + return fmt.Sprintf("%q", eTag) } func ObjectPathEscape(path string) string { diff --git a/api/param.go b/api/param.go index 7e9ef6e75..c2268ca30 100644 --- a/api/param.go +++ b/api/param.go @@ -105,7 +105,7 @@ func (t *TimeRFC3339) UnmarshalText(b []byte) error { // MarshalJSON implements json.Marshaler. func (t TimeRFC3339) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, (time.Time)(t).UTC().Format(time.RFC3339Nano))), nil + return []byte(fmt.Sprintf("%q", (time.Time)(t).UTC().Format(time.RFC3339Nano))), nil } // String implements fmt.Stringer. diff --git a/api/setting.go b/api/setting.go index 47785c9aa..02efe6b5d 100644 --- a/api/setting.go +++ b/api/setting.go @@ -155,11 +155,11 @@ func (rs RedundancySettings) Validate() error { // valid. func (s3as S3AuthenticationSettings) Validate() error { for accessKeyID, secretAccessKey := range s3as.V4Keypairs { - if len(accessKeyID) == 0 { + if accessKeyID == "" { return fmt.Errorf("AccessKeyID cannot be empty") } else if len(accessKeyID) < S3MinAccessKeyLen || len(accessKeyID) > S3MaxAccessKeyLen { return fmt.Errorf("AccessKeyID must be between %d and %d characters long but was %d", S3MinAccessKeyLen, S3MaxAccessKeyLen, len(accessKeyID)) - } else if len(secretAccessKey) == 0 { + } else if secretAccessKey == "" { return fmt.Errorf("SecretAccessKey cannot be empty") } else if len(secretAccessKey) != S3SecretKeyLen { return fmt.Errorf("SecretAccessKey must be %d characters long but was %d", S3SecretKeyLen, len(secretAccessKey)) diff --git a/bus/client/metrics.go b/bus/client/metrics.go index dce120ca8..10bc2fbca 100644 --- a/bus/client/metrics.go +++ b/bus/client/metrics.go @@ -125,7 +125,7 @@ func (c *Client) PruneMetrics(ctx context.Context, metric string, cutoff time.Ti panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), http.NoBody) if err != nil { panic(err) } @@ -180,7 +180,7 @@ func (c *Client) metric(ctx context.Context, key string, values url.Values, res panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) if err != nil { panic(err) } diff --git a/bus/client/slabs.go b/bus/client/slabs.go index e407e0360..db5c0023a 100644 --- a/bus/client/slabs.go +++ b/bus/client/slabs.go @@ -63,7 +63,7 @@ func (c *Client) FetchPartialSlab(ctx context.Context, key object.EncryptionKey, panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) if err != nil { panic(err) } diff --git a/bus/client/wallet.go b/bus/client/wallet.go index 0d4761e51..db9ab4239 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -149,7 +149,7 @@ func (c *Client) WalletTransactions(ctx context.Context, opts ...api.WalletTrans panic(err) } u.RawQuery = values.Encode() - req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), http.NoBody) if err != nil { panic(err) } diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index f9008a4d5..ec153f452 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -376,7 +376,7 @@ func cmdBuildConfig() { // write the config file configPath := "renterd.yml" - if str := os.Getenv("RENTERD_CONFIG_FILE"); len(str) != 0 { + if str := os.Getenv("RENTERD_CONFIG_FILE"); str != "" { configPath = str } diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 093747796..ab1cc67e0 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -139,7 +139,7 @@ func check(context string, err error) { } func mustLoadAPIPassword() { - if len(cfg.HTTP.Password) != 0 { + if cfg.HTTP.Password != "" { return } @@ -192,7 +192,7 @@ func mustParseWorkers(workers, password string) { // loaded. func tryLoadConfig() { configPath := "renterd.yml" - if str := os.Getenv("RENTERD_CONFIG_FILE"); len(str) != 0 { + if str := os.Getenv("RENTERD_CONFIG_FILE"); str != "" { configPath = str } diff --git a/hostdb/hostdb.go b/hostdb/hostdb.go index c1b4769d6..69ed80989 100644 --- a/hostdb/hostdb.go +++ b/hostdb/hostdb.go @@ -38,7 +38,7 @@ func ForEachAnnouncement(b types.Block, height uint64, fn func(types.PublicKey, // verify signature var hostKey types.PublicKey copy(hostKey[:], ha.PublicKey.Key) - annHash := types.Hash256(crypto.HashObject(ha.HostAnnouncement)) // TODO + annHash := types.Hash256(crypto.HashObject(ha.HostAnnouncement)) if !hostKey.VerifyHash(annHash, ha.Signature) { continue } diff --git a/worker/client/client.go b/worker/client/client.go index 6ef70f338..d658ac027 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -88,7 +88,7 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H path += "?" + values.Encode() // TODO: support HEAD in jape client - req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), nil) + req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) if err != nil { panic(err) } @@ -271,7 +271,7 @@ func (c *Client) object(ctx context.Context, bucket, path string, opts api.Downl path += "?" + values.Encode() c.c.Custom("GET", fmt.Sprintf("/objects/%s", path), nil, (*[]api.ObjectMetadata)(nil)) - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/objects/%s", c.c.BaseURL, path), http.NoBody) if err != nil { panic(err) } From 0d0fe4c7671d113c4757705dc245de277cb124c3 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 11:38:06 +0100 Subject: [PATCH 084/201] contractor: extract contractor into its own package --- alerts/alerts.go | 23 + api/autopilot.go | 6 + autopilot/accounts.go | 11 +- autopilot/alerts.go | 92 +--- autopilot/autopilot.go | 438 +++++++--------- autopilot/contract_pruning.go | 108 ++-- autopilot/contractor/alerts.go | 49 ++ autopilot/{ => contractor}/churn.go | 2 +- .../{ => contractor}/contract_spending.go | 18 +- autopilot/{ => contractor}/contractor.go | 494 ++++++++---------- autopilot/{ => contractor}/contractor_test.go | 4 +- autopilot/contractor/evaluate.go | 176 +++++++ .../evaluate_test.go} | 2 +- autopilot/{ => contractor}/hostfilter.go | 6 +- autopilot/{ => contractor}/hostfilter_test.go | 2 +- autopilot/{ => contractor}/hostinfo.go | 47 +- autopilot/{ => contractor}/hosts.go | 2 +- autopilot/{ => contractor}/hosts_test.go | 2 +- autopilot/{ => contractor}/hostscore.go | 2 +- autopilot/{ => contractor}/hostscore_test.go | 23 +- autopilot/{ => contractor}/ipfilter.go | 11 +- autopilot/{ => contractor}/ipfilter_test.go | 12 +- autopilot/contractor/state.go | 53 ++ autopilot/host_test.go | 103 +--- autopilot/migrator.go | 10 +- autopilot/scanner.go | 10 +- autopilot/scanner_test.go | 3 +- internal/test/host.go | 103 ++++ internal/utils/errors.go | 9 + 29 files changed, 989 insertions(+), 832 deletions(-) create mode 100644 autopilot/contractor/alerts.go rename autopilot/{ => contractor}/churn.go (98%) rename autopilot/{ => contractor}/contract_spending.go (69%) rename autopilot/{ => contractor}/contractor.go (76%) rename autopilot/{ => contractor}/contractor_test.go (96%) create mode 100644 autopilot/contractor/evaluate.go rename autopilot/{autopilot_test.go => contractor/evaluate_test.go} (99%) rename autopilot/{ => contractor}/hostfilter.go (97%) rename autopilot/{ => contractor}/hostfilter_test.go (99%) rename autopilot/{ => contractor}/hostinfo.go (72%) rename autopilot/{ => contractor}/hosts.go (98%) rename autopilot/{ => contractor}/hosts_test.go (98%) rename autopilot/{ => contractor}/hostscore.go (99%) rename autopilot/{ => contractor}/hostscore_test.go (92%) rename autopilot/{ => contractor}/ipfilter.go (94%) rename autopilot/{ => contractor}/ipfilter_test.go (97%) create mode 100644 autopilot/contractor/state.go create mode 100644 internal/test/host.go diff --git a/alerts/alerts.go b/alerts/alerts.go index 6b009360d..cc8f205f2 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -9,8 +9,11 @@ import ( "sync" "time" + rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" + "lukechampine.com/frand" ) const ( @@ -83,6 +86,26 @@ type ( } ) +func IDForAccount(alertID [32]byte, id rhpv3.Account) types.Hash256 { + return types.HashBytes(append(alertID[:], id[:]...)) +} + +func IDForContract(alertID [32]byte, fcid types.FileContractID) types.Hash256 { + return types.HashBytes(append(alertID[:], fcid[:]...)) +} + +func IDForHost(alertID [32]byte, hk types.PublicKey) types.Hash256 { + return types.HashBytes(append(alertID[:], hk[:]...)) +} + +func IDForSlab(alertID [32]byte, slabKey object.EncryptionKey) types.Hash256 { + return types.HashBytes(append(alertID[:], []byte(slabKey.String())...)) +} + +func RandomAlertID() types.Hash256 { + return frand.Entropy256() +} + func (ar AlertsResponse) Total() int { return ar.Totals.Info + ar.Totals.Warning + ar.Totals.Error + ar.Totals.Critical } diff --git a/api/autopilot.go b/api/autopilot.go index fdd6c4942..c177c571a 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -63,6 +63,12 @@ type ( } ) +// EndHeight of a contract formed using the AutopilotConfig given the current +// period. +func (ap *Autopilot) EndHeight() uint64 { + return ap.CurrentPeriod + ap.Config.Contracts.Period + ap.Config.Contracts.RenewWindow +} + type ( // AutopilotTriggerRequest is the request object used by the /trigger // endpoint diff --git a/autopilot/accounts.go b/autopilot/accounts.go index 690c2b35d..2081961f8 100644 --- a/autopilot/accounts.go +++ b/autopilot/accounts.go @@ -10,6 +10,7 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.uber.org/zap" ) @@ -103,7 +104,11 @@ func (a *accounts) refillWorkersAccountsLoop(ctx context.Context) { // until the previously launched goroutine returns. func (a *accounts) refillWorkerAccounts(ctx context.Context, w Worker) { // fetch config - state := a.ap.State() + cfg, err := a.ap.Config(ctx) + if err != nil { + a.l.Errorw(fmt.Sprintf("failed to fetch config for refill: %v", err)) + return + } // fetch worker id workerID, err := w.ID(ctx) @@ -122,7 +127,7 @@ func (a *accounts) refillWorkerAccounts(ctx context.Context, w Worker) { } // fetch all contract set contracts - contractSetContracts, err := a.c.Contracts(ctx, api.ContractsOpts{ContractSet: state.cfg.Contracts.Set}) + contractSetContracts, err := a.c.Contracts(ctx, api.ContractsOpts{ContractSet: cfg.Config.Contracts.Set}) if err != nil { a.l.Errorw(fmt.Sprintf("failed to fetch contract set contracts: %v", err)) return @@ -150,7 +155,7 @@ func (a *accounts) refillWorkerAccounts(ctx context.Context, w Worker) { a.l.Errorw(rerr.err.Error(), rerr.keysAndValues...) } else { // dismiss alerts on success - a.ap.DismissAlert(ctx, alertIDForAccount(alertAccountRefillID, accountID)) + a.ap.DismissAlert(ctx, alerts.IDForAccount(alertAccountRefillID, accountID)) // log success if refilled { diff --git a/autopilot/alerts.go b/autopilot/alerts.go index 7a98f8918..51ed31a87 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -10,39 +10,15 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" - "lukechampine.com/frand" ) var ( - alertAccountRefillID = randomAlertID() // constant until restarted - alertChurnID = randomAlertID() // constant until restarted - alertLostSectorsID = randomAlertID() // constant until restarted - alertLowBalanceID = randomAlertID() // constant until restarted - alertMigrationID = randomAlertID() // constant until restarted - alertPruningID = randomAlertID() // constant until restarted - alertRenewalFailedID = randomAlertID() // constant until restarted + alertAccountRefillID = alerts.RandomAlertID() // constant until restarted + alertLowBalanceID = alerts.RandomAlertID() // constant until restarted + alertMigrationID = alerts.RandomAlertID() // constant until restarted + alertPruningID = alerts.RandomAlertID() // constant until restarted ) -func alertIDForAccount(alertID [32]byte, id rhpv3.Account) types.Hash256 { - return types.HashBytes(append(alertID[:], id[:]...)) -} - -func alertIDForContract(alertID [32]byte, fcid types.FileContractID) types.Hash256 { - return types.HashBytes(append(alertID[:], fcid[:]...)) -} - -func alertIDForHost(alertID [32]byte, hk types.PublicKey) types.Hash256 { - return types.HashBytes(append(alertID[:], hk[:]...)) -} - -func alertIDForSlab(alertID [32]byte, slabKey object.EncryptionKey) types.Hash256 { - return types.HashBytes(append(alertID[:], []byte(slabKey.String())...)) -} - -func randomAlertID() types.Hash256 { - return frand.Entropy256() -} - func (ap *Autopilot) RegisterAlert(ctx context.Context, a alerts.Alert) { if err := ap.alerts.RegisterAlert(ctx, a); err != nil { ap.logger.Errorf("failed to register alert: %v", err) @@ -55,20 +31,6 @@ func (ap *Autopilot) DismissAlert(ctx context.Context, ids ...types.Hash256) { } } -func (ap *Autopilot) HasAlert(ctx context.Context, id types.Hash256) bool { - ar, err := ap.alerts.Alerts(ctx, alerts.AlertsOpts{Offset: 0, Limit: -1}) - if err != nil { - ap.logger.Errorf("failed to fetch alerts: %v", err) - return false - } - for _, alert := range ar.Alerts { - if alert.ID == id { - return true - } - } - return false -} - func newAccountLowBalanceAlert(address types.Address, balance, allowance types.Currency, bh, renewWindow, endHeight uint64) alerts.Alert { severity := alerts.SeverityInfo if bh+renewWindow/2 >= endHeight { @@ -103,7 +65,7 @@ func newAccountRefillAlert(id rhpv3.Account, contract api.ContractMetadata, err } return alerts.Alert{ - ID: alertIDForAccount(alertAccountRefillID, id), + ID: alerts.IDForAccount(alertAccountRefillID, id), Severity: alerts.SeverityError, Message: "Ephemeral account refill failed", Data: data, @@ -111,26 +73,6 @@ func newAccountRefillAlert(id rhpv3.Account, contract api.ContractMetadata, err } } -func newContractRenewalFailedAlert(contract api.ContractMetadata, interrupted bool, err error) alerts.Alert { - severity := alerts.SeverityWarning - if interrupted { - severity = alerts.SeverityCritical - } - - return alerts.Alert{ - ID: alertIDForContract(alertRenewalFailedID, contract.ID), - Severity: severity, - Message: "Contract renewal failed", - Data: map[string]interface{}{ - "error": err.Error(), - "renewalsInterrupted": interrupted, - "contractID": contract.ID.String(), - "hostKey": contract.HostKey.String(), - }, - Timestamp: time.Now(), - } -} - func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid types.FileContractID, err error) *alerts.Alert { data := map[string]interface{}{"error": err.Error()} if hk != (types.PublicKey{}) { @@ -144,7 +86,7 @@ func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid type } return &alerts.Alert{ - ID: alertIDForContract(alertPruningID, fcid), + ID: alerts.IDForContract(alertPruningID, fcid), Severity: alerts.SeverityWarning, Message: "Contract pruning failed", Data: data, @@ -152,20 +94,6 @@ func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid type } } -func newLostSectorsAlert(hk types.PublicKey, lostSectors uint64) alerts.Alert { - return alerts.Alert{ - ID: alertIDForHost(alertLostSectorsID, hk), - Severity: alerts.SeverityWarning, - Message: "Host has lost sectors", - Data: map[string]interface{}{ - "lostSectors": lostSectors, - "hostKey": hk.String(), - "hint": "The host has reported that it can't serve at least one sector. Consider blocking this host through the blocklist feature. If you think this was a mistake and you want to ignore this warning for now you can reset the lost sector count", - }, - Timestamp: time.Now(), - } -} - func newOngoingMigrationsAlert(n int, estimate time.Duration) alerts.Alert { data := make(map[string]interface{}) if rounded := estimate.Round(time.Minute); rounded > 0 { @@ -183,7 +111,7 @@ func newOngoingMigrationsAlert(n int, estimate time.Duration) alerts.Alert { func newCriticalMigrationSucceededAlert(slabKey object.EncryptionKey) alerts.Alert { return alerts.Alert{ - ID: alertIDForSlab(alertMigrationID, slabKey), + ID: alerts.IDForSlab(alertMigrationID, slabKey), Severity: alerts.SeverityInfo, Message: "Critical migration succeeded", Data: map[string]interface{}{ @@ -206,7 +134,7 @@ func newCriticalMigrationFailedAlert(slabKey object.EncryptionKey, health float6 } return alerts.Alert{ - ID: alertIDForSlab(alertMigrationID, slabKey), + ID: alerts.IDForSlab(alertMigrationID, slabKey), Severity: alerts.SeverityCritical, Message: "Critical migration failed", Data: data, @@ -233,7 +161,7 @@ func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, objec } return alerts.Alert{ - ID: alertIDForSlab(alertMigrationID, slabKey), + ID: alerts.IDForSlab(alertMigrationID, slabKey), Severity: severity, Message: "Slab migration failed", Data: data, @@ -243,7 +171,7 @@ func newMigrationFailedAlert(slabKey object.EncryptionKey, health float64, objec func newRefreshHealthFailedAlert(err error) alerts.Alert { return alerts.Alert{ - ID: randomAlertID(), + ID: alerts.RandomAlertID(), Severity: alerts.SeverityCritical, Message: "Health refresh failed", Data: map[string]interface{}{ diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 1038f4378..6d8d4c3d3 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -16,13 +16,13 @@ import ( "go.sia.tech/jape" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/autopilot/contractor" "go.sia.tech/renterd/build" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" - "go.sia.tech/renterd/worker" "go.uber.org/zap" ) @@ -100,33 +100,25 @@ type Autopilot struct { workers *workerPool a *accounts - c *contractor + c *contractor.Contractor m *migrator s *scanner tickerDuration time.Duration wg sync.WaitGroup - stateMu sync.Mutex - state state - startStopMu sync.Mutex startTime time.Time shutdownCtx context.Context shutdownCtxCancel context.CancelFunc ticker *time.Ticker triggerChan chan bool -} -// state holds a bunch of variables that are used by the autopilot and updated -type state struct { - gs api.GougingSettings - rs api.RedundancySettings - cfg api.AutopilotConfig + mu sync.Mutex + pruning bool + pruningLastStart time.Time - address types.Address - fee types.Currency - period uint64 + maintenanceTxnIDs []types.TransactionID } // New initializes an Autopilot. @@ -137,7 +129,7 @@ func New(id string, bus Bus, workers []Worker, logger *zap.Logger, heartbeat tim alerts: alerts.WithOrigin(bus, fmt.Sprintf("autopilot.%s", id)), id: id, bus: bus, - logger: logger.Sugar().Named(api.DefaultAutopilotID), + logger: logger.Sugar().Named("autopilot").Named(id), workers: newWorkerPool(workers), shutdownCtx: shutdownCtx, @@ -158,13 +150,17 @@ func New(id string, bus Bus, workers []Worker, logger *zap.Logger, heartbeat tim } ap.s = scanner - ap.c = newContractor(ap, revisionSubmissionBuffer, revisionBroadcastInterval) + ap.c = contractor.New(bus, ap.logger, revisionSubmissionBuffer, revisionBroadcastInterval) ap.m = newMigrator(ap, migrationHealthCutoff, migratorParallelSlabsPerWorker) ap.a = newAccounts(ap, ap.bus, ap.bus, ap.workers, ap.logger, accountsRefillInterval) return ap, nil } +func (ap *Autopilot) Config(ctx context.Context) (api.Autopilot, error) { + return ap.bus.Autopilot(ctx, ap.id) +} + // Handler returns an HTTP handler that serves the autopilot api. func (ap *Autopilot) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ @@ -188,14 +184,21 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // fetch necessary information - cfg := req.AutopilotConfig + reqCfg := req.AutopilotConfig gs := req.GougingSettings rs := req.RedundancySettings cs, err := ap.bus.ConsensusState(ctx) if jc.Check("failed to get consensus state", err) != nil { return } - state := ap.State() + fee, err := ap.bus.RecommendedFee(ctx) + if jc.Check("failed to get recommended fee", err) != nil { + return + } + cfg, err := ap.Config(ctx) + if jc.Check("failed to get autopilot config", err) != nil { + return + } // fetch hosts hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) @@ -204,7 +207,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // evaluate the config - jc.Encode(evaluateConfig(cfg, cs, state.fee, state.period, rs, gs, hosts)) + jc.Encode(contractor.EvaluateConfig(reqCfg, cs, fee, cfg.CurrentPeriod, rs, gs, hosts)) } func (ap *Autopilot) Run() error { @@ -274,6 +277,13 @@ func (ap *Autopilot) Run() error { return } + // fetch configuration + autopilot, err := ap.Config(ap.shutdownCtx) + if err != nil { + ap.logger.Errorf("aborting maintenance, failed to fetch autopilot config", zap.Error(err)) + return + } + // Log worker id chosen for this maintenance iteration. workerID, err := w.ID(ap.shutdownCtx) if err != nil { @@ -282,26 +292,21 @@ func (ap *Autopilot) Run() error { } ap.logger.Infof("using worker %s for iteration", workerID) - // update the loop state - // - // NOTE: it is important this is the first action we perform in this - // iteration of the loop, keeping a state object ensures we use the - // same state throughout the entire iteration and we don't needless - // fetch the same information twice - err = ap.updateState(ap.shutdownCtx) + // perform wallet maintenance + err = ap.performWalletMaintenance(ap.shutdownCtx) if err != nil { - ap.logger.Errorf("failed to update state, err: %v", err) - return + ap.logger.Errorf("wallet maintenance failed, err: %v", err) } - // perform wallet maintenance - err = ap.c.performWalletMaintenance(ap.shutdownCtx) + // build maintenance state + state, err := ap.buildState(ap.shutdownCtx) if err != nil { - ap.logger.Errorf("wallet maintenance failed, err: %v", err) + ap.logger.Errorf("aborting maintenance, failed to build state, err: %v", err) + return } // perform maintenance - setChanged, err := ap.c.performContractMaintenance(ap.shutdownCtx, w) + setChanged, err := ap.c.PerformContractMaintenance(ap.shutdownCtx, state) if err != nil && utils.IsErr(err, context.Canceled) { return } else if err != nil { @@ -327,8 +332,8 @@ func (ap *Autopilot) Run() error { ap.m.tryPerformMigrations(ap.workers) // pruning - if ap.state.cfg.Contracts.Prune { - ap.c.tryPerformPruning(ap.workers) + if autopilot.Config.Contracts.Prune { + ap.tryPerformPruning(ap.workers) } else { ap.logger.Debug("pruning disabled") } @@ -368,12 +373,6 @@ func (ap *Autopilot) StartTime() time.Time { return ap.startTime } -func (ap *Autopilot) State() state { - ap.stateMu.Lock() - defer ap.stateMu.Unlock() - return ap.state -} - func (ap *Autopilot) Trigger(forceScan bool) bool { ap.startStopMu.Lock() defer ap.startStopMu.Unlock() @@ -550,86 +549,110 @@ func (ap *Autopilot) isRunning() bool { return !ap.startTime.IsZero() } -func (ap *Autopilot) updateState(ctx context.Context) error { - // fetch the autopilot from the bus - autopilot, err := ap.bus.Autopilot(ctx, ap.id) - if err != nil { - return err +func (ap *Autopilot) isStopped() bool { + select { + case <-ap.shutdownCtx.Done(): + return true + default: + return false } +} - // fetch consensus state - cs, err := ap.bus.ConsensusState(ctx) - if err != nil { - return fmt.Errorf("could not fetch consensus state, err: %v", err) +func (ap *Autopilot) performWalletMaintenance(ctx context.Context) error { + if ap.isStopped() { + return nil // skip contract maintenance if we're not synced } - // fetch redundancy settings - rs, err := ap.bus.RedundancySettings(ctx) + ap.logger.Info("performing wallet maintenance") + + autopilot, err := ap.Config(ctx) if err != nil { - return fmt.Errorf("could not fetch redundancy settings, err: %v", err) + return fmt.Errorf("failed to fetch autopilot config: %w", err) } - - // fetch gouging settings - gs, err := ap.bus.GougingSettings(ctx) + w, err := ap.bus.Wallet(ctx) if err != nil { - return fmt.Errorf("could not fetch gouging settings, err: %v", err) + return fmt.Errorf("failed to fetch wallet: %w", err) } - // fetch recommended transaction fee - fee, err := ap.bus.RecommendedFee(ctx) + // convenience variables + b := ap.bus + l := ap.logger + cfg := autopilot.Config + renewWindow := cfg.Contracts.RenewWindow + + // no contracts - nothing to do + if cfg.Contracts.Amount == 0 { + l.Warn("wallet maintenance skipped, no contracts wanted") + return nil + } + + // no allowance - nothing to do + if cfg.Contracts.Allowance.IsZero() { + l.Warn("wallet maintenance skipped, no allowance set") + return nil + } + + // fetch consensus state + cs, err := ap.bus.ConsensusState(ctx) if err != nil { - return fmt.Errorf("could not fetch fee, err: %v", err) + l.Warnf("wallet maintenance skipped, fetching consensus state failed with err: %v", err) + return err } - // fetch our wallet address - wi, err := ap.bus.Wallet(ctx) + // fetch wallet balance + wallet, err := b.Wallet(ctx) if err != nil { - return fmt.Errorf("could not fetch wallet address, err: %v", err) + l.Warnf("wallet maintenance skipped, fetching wallet balance failed with err: %v", err) + return err } - address := wi.Address + balance := wallet.Confirmed - // update current period if necessary - if cs.Synced { - if autopilot.CurrentPeriod == 0 { - autopilot.CurrentPeriod = cs.BlockHeight - err := ap.bus.UpdateAutopilot(ctx, autopilot) - if err != nil { - return err - } - ap.logger.Infof("initialised current period to %d", autopilot.CurrentPeriod) - } else if nextPeriod := autopilot.CurrentPeriod + autopilot.Config.Contracts.Period; cs.BlockHeight >= nextPeriod { - prevPeriod := autopilot.CurrentPeriod - autopilot.CurrentPeriod = nextPeriod - err := ap.bus.UpdateAutopilot(ctx, autopilot) - if err != nil { - return err + // register an alert if balance is low + if balance.Cmp(cfg.Contracts.Allowance) < 0 { + ap.RegisterAlert(ctx, newAccountLowBalanceAlert(w.Address, balance, cfg.Contracts.Allowance, cs.BlockHeight, renewWindow, autopilot.EndHeight())) + } else { + ap.DismissAlert(ctx, alertLowBalanceID) + } + + // pending maintenance transaction - nothing to do + pending, err := b.WalletPending(ctx) + if err != nil { + return nil + } + for _, txn := range pending { + for _, mTxnID := range ap.maintenanceTxnIDs { + if mTxnID == txn.ID() { + l.Debugf("wallet maintenance skipped, pending transaction found with id %v", mTxnID) + return nil } - ap.logger.Infof("updated current period from %d to %d", prevPeriod, nextPeriod) } } - // update the state - ap.stateMu.Lock() - ap.state = state{ - gs: gs, - rs: rs, - cfg: autopilot.Config, + wantedNumOutputs := 10 - address: address, - fee: fee, - period: autopilot.CurrentPeriod, + // enough outputs - nothing to do + available, err := b.WalletOutputs(ctx) + if err != nil { + return err } - ap.stateMu.Unlock() - return nil -} + if uint64(len(available)) >= uint64(wantedNumOutputs) { + l.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), uint64(wantedNumOutputs)) + return nil + } + wantedNumOutputs -= len(available) -func (ap *Autopilot) isStopped() bool { - select { - case <-ap.shutdownCtx.Done(): - return true - default: - return false + // figure out the amount per output + amount := cfg.Contracts.Allowance.Div64(uint64(wantedNumOutputs)) + + // redistribute outputs + ids, err := b.WalletRedistribute(ctx, wantedNumOutputs, amount) + if err != nil { + return fmt.Errorf("failed to redistribute wallet into %d outputs of amount %v, balance %v, err %v", wantedNumOutputs, amount, balance, err) } + + l.Debugf("wallet maintenance succeeded, txns %v", ids) + ap.maintenanceTxnIDs = ids + return nil } func (ap *Autopilot) configHandlerGET(jc jape.Context) { @@ -687,7 +710,12 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { return } - host, err := ap.c.HostInfo(jc.Request.Context(), hostKey) + state, err := ap.buildState(jc.Request.Context()) + if jc.Check("failed to build state", err) != nil { + return + } + + host, err := ap.c.HostInfo(jc.Request.Context(), hostKey, state) if jc.Check("failed to get host info", err) != nil { return } @@ -695,7 +723,9 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { } func (ap *Autopilot) stateHandlerGET(jc jape.Context) { - pruning, pLastStart := ap.c.Status() + ap.mu.Lock() + pruning, pLastStart := ap.pruning, ap.pruningLastStart // TODO: move to a 'pruner' type + ap.mu.Unlock() migrating, mLastStart := ap.m.Status() scanning, sLastStart := ap.s.Status() _, err := ap.bus.Autopilot(jc.Request.Context(), ap.id) @@ -730,177 +760,81 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - hosts, err := ap.c.HostInfos(jc.Request.Context(), req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) + state, err := ap.buildState(jc.Request.Context()) + if jc.Check("failed to build state", err) != nil { + return + } + hosts, err := ap.c.HostInfos(jc.Request.Context(), state, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check("failed to get host info", err) != nil { return } jc.Encode(hosts) } -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) - for _, host := range hosts { - usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) - if usable { - usables++ - } +func (ap *Autopilot) buildState(ctx context.Context) (*contractor.State, error) { + // fetch the autopilot from the bus + autopilot, err := ap.Config(ctx) + if err != nil { + return nil, err } - return -} -// evaluateConfig evaluates the given configuration and if the gouging settings -// are too strict for the number of contracts required by 'cfg', it will provide -// a recommendation on how to loosen it. -func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) - - resp.Hosts = uint64(len(hosts)) - for _, host := range hosts { - usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) - if usable { - resp.Usable++ - continue - } - if usableBreakdown.blocked > 0 { - resp.Unusable.Blocked++ - } - if usableBreakdown.notacceptingcontracts > 0 { - resp.Unusable.NotAcceptingContracts++ - } - if usableBreakdown.notcompletingscan > 0 { - resp.Unusable.NotScanned++ - } - if usableBreakdown.unknown > 0 { - resp.Unusable.Unknown++ - } - if usableBreakdown.gougingBreakdown.ContractErr != "" { - resp.Unusable.Gouging.Contract++ - } - if usableBreakdown.gougingBreakdown.DownloadErr != "" { - resp.Unusable.Gouging.Download++ - } - if usableBreakdown.gougingBreakdown.GougingErr != "" { - resp.Unusable.Gouging.Gouging++ - } - if usableBreakdown.gougingBreakdown.PruneErr != "" { - resp.Unusable.Gouging.Pruning++ - } - if usableBreakdown.gougingBreakdown.UploadErr != "" { - resp.Unusable.Gouging.Upload++ - } + // fetch consensus state + cs, err := ap.bus.ConsensusState(ctx) + if err != nil { + return nil, fmt.Errorf("could not fetch consensus state, err: %v", err) } - if resp.Usable >= cfg.Contracts.Amount { - return // no recommendation needed + // fetch redundancy settings + rs, err := ap.bus.RedundancySettings(ctx) + if err != nil { + return nil, fmt.Errorf("could not fetch redundancy settings, err: %v", err) } - // optimise gouging settings - maxGS := func() api.GougingSettings { - return api.GougingSettings{ - // these are the fields we optimise one-by-one - MaxRPCPrice: types.MaxCurrency, - MaxContractPrice: types.MaxCurrency, - MaxDownloadPrice: types.MaxCurrency, - MaxUploadPrice: types.MaxCurrency, - MaxStoragePrice: types.MaxCurrency, - - // these are not optimised, so we keep the same values as the user - // provided - HostBlockHeightLeeway: gs.HostBlockHeightLeeway, - MinPriceTableValidity: gs.MinPriceTableValidity, - MinAccountExpiry: gs.MinAccountExpiry, - MinMaxEphemeralAccountBalance: gs.MinMaxEphemeralAccountBalance, - MigrationSurchargeMultiplier: gs.MigrationSurchargeMultiplier, - } + // fetch gouging settings + gs, err := ap.bus.GougingSettings(ctx) + if err != nil { + return nil, fmt.Errorf("could not fetch gouging settings, err: %v", err) } - // use the input gouging settings as the starting point and try to optimise - // each field independent of the other fields we want to optimise - optimisedGS := gs - success := false - - // MaxRPCPrice - tmpGS := maxGS() - tmpGS.MaxRPCPrice = gs.MaxRPCPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, currentPeriod, rs, hosts) { - optimisedGS.MaxRPCPrice = tmpGS.MaxRPCPrice - success = true - } - // MaxContractPrice - tmpGS = maxGS() - tmpGS.MaxContractPrice = gs.MaxContractPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, currentPeriod, rs, hosts) { - optimisedGS.MaxContractPrice = tmpGS.MaxContractPrice - success = true - } - // MaxDownloadPrice - tmpGS = maxGS() - tmpGS.MaxDownloadPrice = gs.MaxDownloadPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { - optimisedGS.MaxDownloadPrice = tmpGS.MaxDownloadPrice - success = true - } - // MaxUploadPrice - tmpGS = maxGS() - tmpGS.MaxUploadPrice = gs.MaxUploadPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { - optimisedGS.MaxUploadPrice = tmpGS.MaxUploadPrice - success = true - } - // MaxStoragePrice - tmpGS = maxGS() - tmpGS.MaxStoragePrice = gs.MaxStoragePrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, currentPeriod, rs, hosts) { - optimisedGS.MaxStoragePrice = tmpGS.MaxStoragePrice - success = true - } - // If one of the optimisations was successful, we return the optimised - // gouging settings - if success { - resp.Recommendation = &api.ConfigRecommendation{ - GougingSettings: optimisedGS, - } + // fetch recommended transaction fee + fee, err := ap.bus.RecommendedFee(ctx) + if err != nil { + return nil, fmt.Errorf("could not fetch fee, err: %v", err) } - return -} -// optimiseGougingSetting tries to optimise one field of the gouging settings to -// try and hit the target number of contracts. -func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { - if cfg.Contracts.Amount == 0 { - return true // nothing to do + // fetch our wallet address + wi, err := ap.bus.Wallet(ctx) + if err != nil { + return nil, fmt.Errorf("could not fetch wallet address, err: %v", err) } - stepSize := []uint64{200, 150, 125, 110, 105} - maxSteps := 12 - - stepIdx := 0 - nSteps := 0 - prevVal := *field // to keep accurate value - for { - nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) - targetHit := nUsable >= cfg.Contracts.Amount - - if targetHit && nSteps == 0 { - return true // target already hit without optimising - } else if targetHit && stepIdx == len(stepSize)-1 { - return true // target hit after optimising - } else if targetHit { - // move one step back and decrease step size - stepIdx++ - nSteps-- - *field = prevVal - } else if nSteps >= maxSteps { - return false // ran out of steps - } + address := wi.Address - // apply next step - prevVal = *field - newValue, overflow := prevVal.Mul64WithOverflow(stepSize[stepIdx]) - if overflow { - return false + // update current period if necessary + if cs.Synced { + if autopilot.CurrentPeriod == 0 { + autopilot.CurrentPeriod = cs.BlockHeight + err := ap.bus.UpdateAutopilot(ctx, autopilot) + if err != nil { + return nil, err + } + ap.logger.Infof("initialised current period to %d", autopilot.CurrentPeriod) + } else if nextPeriod := autopilot.CurrentPeriod + autopilot.Config.Contracts.Period; cs.BlockHeight >= nextPeriod { + prevPeriod := autopilot.CurrentPeriod + autopilot.CurrentPeriod = nextPeriod + err := ap.bus.UpdateAutopilot(ctx, autopilot) + if err != nil { + return nil, err + } + ap.logger.Infof("updated current period from %d to %d", prevPeriod, nextPeriod) } - newValue = newValue.Div64(100) - *field = newValue - nSteps++ } + + return &contractor.State{ + GS: gs, + RS: rs, + AP: autopilot, + + Address: address, + Fee: fee, + }, nil } diff --git a/autopilot/contract_pruning.go b/autopilot/contract_pruning.go index aa0eb505f..df2abdf93 100644 --- a/autopilot/contract_pruning.go +++ b/autopilot/contract_pruning.go @@ -9,18 +9,20 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/build" ) var ( - errConnectionRefused = errors.New("connection refused") - errConnectionTimedOut = errors.New("connection timed out") - errConnectionResetByPeer = errors.New("connection reset by peer") errInvalidHandshakeSignature = errors.New("host's handshake signature was invalid") errInvalidMerkleProof = errors.New("host supplied invalid Merkle proof") - errNoRouteToHost = errors.New("no route to host") - errNoSuchHost = errors.New("no such host") +) + +const ( + // timeoutPruneContract is the amount of time we wait for a contract to get + // pruned. + timeoutPruneContract = 10 * time.Minute ) type ( @@ -64,16 +66,16 @@ func (pm pruneMetrics) String() string { } func (pr pruneResult) toAlert() (id types.Hash256, alert *alerts.Alert) { - id = alertIDForContract(alertPruningID, pr.fcid) + id = alerts.IDForContract(alertPruningID, pr.fcid) if shouldTrigger := pr.err != nil && !((utils.IsErr(pr.err, errInvalidMerkleProof) && build.VersionCmp(pr.version, "1.6.0") < 0) || utils.IsErr(pr.err, api.ErrContractNotFound) || // contract got archived - utils.IsErr(pr.err, errConnectionRefused) || - utils.IsErr(pr.err, errConnectionTimedOut) || - utils.IsErr(pr.err, errConnectionResetByPeer) || + utils.IsErr(pr.err, utils.ErrConnectionRefused) || + utils.IsErr(pr.err, utils.ErrConnectionTimedOut) || + utils.IsErr(pr.err, utils.ErrConnectionResetByPeer) || utils.IsErr(pr.err, errInvalidHandshakeSignature) || - utils.IsErr(pr.err, errNoRouteToHost) || - utils.IsErr(pr.err, errNoSuchHost)); shouldTrigger { + utils.IsErr(pr.err, utils.ErrNoRouteToHost) || + utils.IsErr(pr.err, utils.ErrNoSuchHost)); shouldTrigger { alert = newContractPruningFailedAlert(pr.hk, pr.version, pr.fcid, pr.err) } return @@ -90,21 +92,27 @@ func (pr pruneResult) toMetric() api.ContractPruneMetric { } } -func (c *contractor) fetchPrunableContracts() (prunable []api.ContractPrunableData, _ error) { +func (ap *Autopilot) fetchPrunableContracts() (prunable []api.ContractPrunableData, _ error) { // use a sane timeout - ctx, cancel := context.WithTimeout(c.ap.shutdownCtx, time.Minute) + ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) defer cancel() // fetch prunable data - res, err := c.ap.bus.PrunableData(ctx) + res, err := ap.bus.PrunableData(ctx) if err != nil { return nil, err } else if res.TotalPrunable == 0 { return nil, nil } + // fetch autopilot + autopilot, err := ap.bus.Autopilot(ctx, ap.id) + if err != nil { + return nil, err + } + // fetch contract set contracts - csc, err := c.ap.bus.Contracts(ctx, api.ContractsOpts{ContractSet: c.ap.state.cfg.Contracts.Set}) + csc, err := ap.bus.Contracts(ctx, api.ContractsOpts{ContractSet: autopilot.Config.Contracts.Set}) if err != nil { return nil, err } @@ -124,16 +132,28 @@ func (c *contractor) fetchPrunableContracts() (prunable []api.ContractPrunableDa return } -func (c *contractor) performContractPruning(wp *workerPool) { - c.logger.Info("performing contract pruning") +func (ap *Autopilot) hostForContract(ctx context.Context, fcid types.FileContractID) (host hostdb.HostInfo, metadata api.ContractMetadata, err error) { + // fetch the contract + metadata, err = ap.bus.Contract(ctx, fcid) + if err != nil { + return + } + + // fetch the host + host, err = ap.bus.Host(ctx, metadata.HostKey) + return +} + +func (ap *Autopilot) performContractPruning(wp *workerPool) { + ap.logger.Info("performing contract pruning") // fetch prunable contracts - prunable, err := c.fetchPrunableContracts() + prunable, err := ap.fetchPrunableContracts() if err != nil { - c.logger.Error(err) + ap.logger.Error(err) return } else if len(prunable) == 0 { - c.logger.Info("no contracts to prune") + ap.logger.Info("no contracts to prune") return } @@ -144,24 +164,24 @@ func (c *contractor) performContractPruning(wp *workerPool) { wp.withWorker(func(w Worker) { for _, contract := range prunable { // return if we're stopped - if c.ap.isStopped() { + if ap.isStopped() { return } // prune contract - result := c.pruneContract(w, contract.ID) + result := ap.pruneContract(w, contract.ID) if result.err != nil { - c.logger.Error(result) + ap.logger.Error(result) } else { - c.logger.Info(result) + ap.logger.Info(result) } // handle alert - ctx, cancel := context.WithTimeout(c.ap.shutdownCtx, time.Minute) + ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) if id, alert := result.toAlert(); alert != nil { - c.ap.RegisterAlert(ctx, *alert) + ap.RegisterAlert(ctx, *alert) } else { - c.ap.DismissAlert(ctx, id) + ap.DismissAlert(ctx, id) } cancel() @@ -171,23 +191,23 @@ func (c *contractor) performContractPruning(wp *workerPool) { }) // record metrics - ctx, cancel := context.WithTimeout(c.ap.shutdownCtx, time.Minute) - if err := c.ap.bus.RecordContractPruneMetric(ctx, metrics...); err != nil { - c.logger.Error(err) + ctx, cancel := context.WithTimeout(ap.shutdownCtx, time.Minute) + if err := ap.bus.RecordContractPruneMetric(ctx, metrics...); err != nil { + ap.logger.Error(err) } cancel() // log metrics - c.logger.Info(metrics) + ap.logger.Info(metrics) } -func (c *contractor) pruneContract(w Worker, fcid types.FileContractID) pruneResult { +func (ap *Autopilot) pruneContract(w Worker, fcid types.FileContractID) pruneResult { // create a sane timeout - ctx, cancel := context.WithTimeout(c.ap.shutdownCtx, 2*timeoutPruneContract) + ctx, cancel := context.WithTimeout(ap.shutdownCtx, 2*timeoutPruneContract) defer cancel() // fetch the host - host, _, err := c.hostForContract(ctx, fcid) + host, _, err := ap.hostForContract(ctx, fcid) if err != nil { return pruneResult{fcid: fcid, err: err} } @@ -229,3 +249,23 @@ func humanReadableSize(b int) string { return fmt.Sprintf("%.1f %ciB", float64(b)/float64(div), "KMGTPE"[exp]) } + +func (ap *Autopilot) tryPerformPruning(wp *workerPool) { + ap.mu.Lock() + if ap.pruning || ap.isStopped() { + ap.mu.Unlock() + return + } + ap.pruning = true + ap.pruningLastStart = time.Now() + ap.mu.Unlock() + + ap.wg.Add(1) + go func() { + defer ap.wg.Done() + ap.performContractPruning(wp) + ap.mu.Lock() + ap.pruning = false + ap.mu.Unlock() + }() +} diff --git a/autopilot/contractor/alerts.go b/autopilot/contractor/alerts.go new file mode 100644 index 000000000..d242bc8f5 --- /dev/null +++ b/autopilot/contractor/alerts.go @@ -0,0 +1,49 @@ +package contractor + +import ( + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" + "go.sia.tech/renterd/api" +) + +var ( + alertChurnID = alerts.RandomAlertID() // constant until restarted + alertLostSectorsID = alerts.RandomAlertID() // constant until restarted + alertRenewalFailedID = alerts.RandomAlertID() // constant until restarted +) + +func newContractRenewalFailedAlert(contract api.ContractMetadata, interrupted bool, err error) alerts.Alert { + severity := alerts.SeverityWarning + if interrupted { + severity = alerts.SeverityCritical + } + + return alerts.Alert{ + ID: alerts.IDForContract(alertRenewalFailedID, contract.ID), + Severity: severity, + Message: "Contract renewal failed", + Data: map[string]interface{}{ + "error": err.Error(), + "renewalsInterrupted": interrupted, + "contractID": contract.ID.String(), + "hostKey": contract.HostKey.String(), + }, + Timestamp: time.Now(), + } +} + +func newLostSectorsAlert(hk types.PublicKey, lostSectors uint64) alerts.Alert { + return alerts.Alert{ + ID: alerts.IDForHost(alertLostSectorsID, hk), + Severity: alerts.SeverityWarning, + Message: "Host has lost sectors", + Data: map[string]interface{}{ + "lostSectors": lostSectors, + "hostKey": hk.String(), + "hint": "The host has reported that it can't serve at least one sector. Consider blocking this host through the blocklist feature. If you think this was a mistake and you want to ignore this warning for now you can reset the lost sector count", + }, + Timestamp: time.Now(), + } +} diff --git a/autopilot/churn.go b/autopilot/contractor/churn.go similarity index 98% rename from autopilot/churn.go rename to autopilot/contractor/churn.go index 31a1073cf..b6d1d6505 100644 --- a/autopilot/churn.go +++ b/autopilot/contractor/churn.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "time" diff --git a/autopilot/contract_spending.go b/autopilot/contractor/contract_spending.go similarity index 69% rename from autopilot/contract_spending.go rename to autopilot/contractor/contract_spending.go index cbd10f86c..0938e2755 100644 --- a/autopilot/contract_spending.go +++ b/autopilot/contractor/contract_spending.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "context" @@ -7,8 +7,8 @@ import ( "go.sia.tech/renterd/api" ) -func (c *contractor) contractSpending(ctx context.Context, contract api.Contract, currentPeriod uint64) (api.ContractSpending, error) { - ancestors, err := c.ap.bus.AncestorContracts(ctx, contract.ID, currentPeriod) +func (c *Contractor) contractSpending(ctx context.Context, contract api.Contract, currentPeriod uint64) (api.ContractSpending, error) { + ancestors, err := c.bus.AncestorContracts(ctx, contract.ID, currentPeriod) if err != nil { return api.ContractSpending{}, err } @@ -20,7 +20,7 @@ func (c *contractor) contractSpending(ctx context.Context, contract api.Contract return total, nil } -func (c *contractor) currentPeriodSpending(contracts []api.Contract, currentPeriod uint64) types.Currency { +func (c *Contractor) currentPeriodSpending(contracts []api.Contract, currentPeriod uint64) types.Currency { totalCosts := make(map[types.FileContractID]types.Currency) for _, c := range contracts { totalCosts[c.ID] = c.TotalCost @@ -44,16 +44,14 @@ func (c *contractor) currentPeriodSpending(contracts []api.Contract, currentPeri return totalAllocated } -func (c *contractor) remainingFunds(contracts []api.Contract) types.Currency { - state := c.ap.State() - +func (c *Contractor) remainingFunds(contracts []api.Contract, state *State) types.Currency { // find out how much we spent in the current period - spent := c.currentPeriodSpending(contracts, state.period) + spent := c.currentPeriodSpending(contracts, state.Period()) // figure out remaining funds var remaining types.Currency - if state.cfg.Contracts.Allowance.Cmp(spent) > 0 { - remaining = state.cfg.Contracts.Allowance.Sub(spent) + if state.Allowance().Cmp(spent) > 0 { + remaining = state.Allowance().Sub(spent) } return remaining } diff --git a/autopilot/contractor.go b/autopilot/contractor/contractor.go similarity index 76% rename from autopilot/contractor.go rename to autopilot/contractor/contractor.go index 43ac5e629..0eee7e7ef 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor/contractor.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "context" @@ -14,6 +14,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" @@ -77,33 +78,53 @@ const ( // timeoutBroadcastRevision is the amount of time we wait for the broadcast // of a revision to succeed. timeoutBroadcastRevision = time.Minute - - // timeoutPruneContract is the amount of time we wait for a contract to get - // pruned. - timeoutPruneContract = 10 * time.Minute ) +type Bus interface { + AddContract(ctx context.Context, c rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, state string) (api.ContractMetadata, error) + AddRenewedContract(ctx context.Context, c rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, renewedFrom types.FileContractID, state string) (api.ContractMetadata, error) + AncestorContracts(ctx context.Context, id types.FileContractID, minStartHeight uint64) ([]api.ArchivedContract, error) + ArchiveContracts(ctx context.Context, toArchive map[types.FileContractID]string) error + ConsensusState(ctx context.Context) (api.ConsensusState, error) + Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) + Contracts(ctx context.Context, opts api.ContractsOpts) (contracts []api.ContractMetadata, err error) + FileContractTax(ctx context.Context, payout types.Currency) (types.Currency, error) + Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) + RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) + SetContractSet(ctx context.Context, set string, contracts []types.FileContractID) error + Wallet(ctx context.Context) (api.WalletResponse, error) +} + +type Worker interface { + Contracts(ctx context.Context, hostTimeout time.Duration) (api.ContractsResponse, error) + RHPBroadcast(ctx context.Context, fcid types.FileContractID) (err error) + RHPForm(ctx context.Context, endHeight uint64, hk types.PublicKey, hostIP string, renterAddress types.Address, renterFunds types.Currency, hostCollateral types.Currency) (rhpv2.ContractRevision, []types.Transaction, error) + RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (hostdb.HostPriceTable, error) + RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral types.Currency, expectedStorage, windowSize uint64) (api.RHPRenewResponse, error) + RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) +} + type ( - contractor struct { - ap *Autopilot + Contractor struct { + alerter alerts.Alerter + bus Bus churn *accumulatedChurn resolver *ipResolver logger *zap.SugaredLogger - maintenanceTxnIDs []types.TransactionID - revisionBroadcastInterval time.Duration revisionLastBroadcast map[types.FileContractID]time.Time revisionSubmissionBuffer uint64 mu sync.Mutex - pruning bool - pruningLastStart time.Time - cachedHostInfo map[types.PublicKey]hostInfo cachedDataStored map[types.PublicKey]uint64 cachedMinScore float64 + + shutdownCtx context.Context + shutdownCtxCancel context.CancelFunc } hostInfo struct { @@ -122,6 +143,7 @@ type ( priceTable rhpv3.HostPriceTable usable bool recoverable bool + InSet bool } contractSetAdditions struct { @@ -152,59 +174,71 @@ type ( } ) -func newContractor(ap *Autopilot, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *contractor { - return &contractor{ - ap: ap, - churn: newAccumulatedChurn(), - logger: ap.logger.Named("contractor"), +func New(alerter alerts.Alerter, logger *zap.SugaredLogger, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *Contractor { + logger = logger.Named("contractor") + ctx, cancel := context.WithCancel(context.Background()) + return &Contractor{ + alerter: alerter, + churn: newAccumulatedChurn(), + logger: logger, revisionBroadcastInterval: revisionBroadcastInterval, revisionLastBroadcast: make(map[types.FileContractID]time.Time), revisionSubmissionBuffer: revisionSubmissionBuffer, - resolver: newIPResolver(ap.shutdownCtx, resolverLookupTimeout, ap.logger.Named("resolver")), + resolver: newIPResolver(ctx, resolverLookupTimeout, logger.Named("resolver")), + + shutdownCtx: ctx, + shutdownCtxCancel: cancel, } } -func (c *contractor) Status() (bool, time.Time) { - c.mu.Lock() - defer c.mu.Unlock() - return c.pruning, c.pruningLastStart +func (c *Contractor) Close() error { + c.shutdownCtxCancel() + return nil } -func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) (bool, error) { - // skip contract maintenance if we're stopped or not synced - if c.ap.isStopped() { - return false, nil +func canSkipContractMaintenance(ctx context.Context, cfg api.ContractsConfig) (string, bool) { + select { + case <-ctx.Done(): + return "", true + default: } - c.logger.Info("performing contract maintenance") - - // convenience variables - state := c.ap.State() // no maintenance if no hosts are requested // // NOTE: this is an important check because we assume Contracts.Amount is // not zero in several places - if state.cfg.Contracts.Amount == 0 { - c.logger.Warn("contracts is set to zero, skipping contract maintenance") - return false, nil + if cfg.Amount == 0 { + return "contracts is set to zero, skipping contract maintenance", true } // no maintenance if no allowance was set - if state.cfg.Contracts.Allowance.IsZero() { - c.logger.Warn("allowance is set to zero, skipping contract maintenance") - return false, nil + if cfg.Allowance.IsZero() { + return "allowance is set to zero, skipping contract maintenance", true } // no maintenance if no period was set - if state.cfg.Contracts.Period == 0 { - c.logger.Warn("period is set to zero, skipping contract maintenance") - return false, nil + if cfg.Period == 0 { + return "period is set to zero, skipping contract maintenance", true } + return "", false +} + +func (c *Contractor) PerformContractMaintenance(ctx context.Context, state *State) (bool, error) { + // convenience variables + w := state.Worker + + // check if we can skip maintenance + if reason, skip := canSkipContractMaintenance(ctx, state.Config().Contracts); skip { + if reason != "" { + c.logger.Warn(reason) + } + } + c.logger.Info("performing contract maintenance") // fetch current contract set - currentSet, err := c.ap.bus.Contracts(ctx, api.ContractsOpts{ContractSet: state.cfg.Contracts.Set}) + currentSet, err := c.bus.Contracts(ctx, api.ContractsOpts{ContractSet: state.ContractSet()}) if err != nil && !strings.Contains(err.Error(), api.ErrContractSetNotFound.Error()) { return false, err } @@ -212,7 +246,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( for _, c := range currentSet { isInCurrentSet[c.ID] = struct{}{} } - c.logger.Debugf("contract set '%s' holds %d contracts", state.cfg.Contracts.Set, len(currentSet)) + c.logger.Debugf("contract set '%s' holds %d contracts", state.ContractSet(), len(currentSet)) // fetch all contracts from the worker. start := time.Now() @@ -249,7 +283,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) + hosts, err := c.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) if err != nil { return false, err } @@ -258,17 +292,17 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( var toDismiss []types.Hash256 for _, h := range hosts { if h.Interactions.LostSectors > 0 { - c.ap.RegisterAlert(ctx, newLostSectorsAlert(h.PublicKey, h.Interactions.LostSectors)) + c.alerter.RegisterAlert(ctx, newLostSectorsAlert(h.PublicKey, h.Interactions.LostSectors)) } else { - toDismiss = append(toDismiss, alertIDForHost(alertLostSectorsID, h.PublicKey)) + toDismiss = append(toDismiss, alerts.IDForHost(alertLostSectorsID, h.PublicKey)) } } if len(toDismiss) > 0 { - c.ap.DismissAlert(ctx, toDismiss...) + c.alerter.DismissAlerts(ctx, toDismiss...) } // fetch candidate hosts - candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts + candidates, unusableHosts, err := c.candidateHosts(ctx, state, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts if err != nil { return false, err } @@ -276,26 +310,26 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // min score to pass checks var minScore float64 if len(hosts) > 0 { - minScore = c.calculateMinScore(candidates, state.cfg.Contracts.Amount) + minScore = c.calculateMinScore(candidates, state.WantedContracts()) } else { c.logger.Warn("could not calculate min score, no hosts found") } // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return false, err } // create gouging checker - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) // prepare hosts for cache hostInfos := make(map[types.PublicKey]hostInfo) for _, h := range hosts { // ignore the pricetable's HostBlockHeight by setting it to our own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, h, minScore, hostData[h.PublicKey]) + isUsable, unusableResult := isUsableHost(state.Config(), state.RS, gc, h, minScore, hostData[h.PublicKey]) hostInfos[h.PublicKey] = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, @@ -310,7 +344,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( c.mu.Unlock() // run checks - updatedSet, toArchive, toStopUsing, toRefresh, toRenew, err := c.runContractChecks(ctx, w, contracts, isInCurrentSet, minScore) + updatedSet, toArchive, toStopUsing, toRefresh, toRenew, err := c.runContractChecks(ctx, state, contracts, isInCurrentSet, minScore) if err != nil { return false, fmt.Errorf("failed to run contract checks, err: %v", err) } @@ -318,13 +352,13 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // archive contracts if len(toArchive) > 0 { c.logger.Debugf("archiving %d contracts: %+v", len(toArchive), toArchive) - if err := c.ap.bus.ArchiveContracts(ctx, toArchive); err != nil { + if err := c.bus.ArchiveContracts(ctx, toArchive); err != nil { c.logger.Errorf("failed to archive contracts, err: %v", err) // continue } } // calculate remaining funds - remaining := c.remainingFunds(contracts) + remaining := c.remainingFunds(contracts, state) // calculate 'limit' amount of contracts we want to renew var limit int @@ -341,7 +375,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } return toRenew[i].contract.FileSize() > toRenew[j].contract.FileSize() }) - for len(updatedSet)+limit < int(state.cfg.Contracts.Amount) && limit < len(toRenew) { + for len(updatedSet)+limit < int(state.WantedContracts()) && limit < len(toRenew) { // as long as we're missing contracts, increase the renewal limit limit++ } @@ -353,7 +387,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( var renewed []renewal if limit > 0 { var toKeep []api.ContractMetadata - renewed, toKeep = c.runContractRenewals(ctx, w, toRenew, &remaining, limit) + renewed, toKeep = c.runContractRenewals(ctx, state, toRenew, &remaining, limit) for _, ri := range renewed { if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) @@ -364,7 +398,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // run contract refreshes - refreshed, err := c.runContractRefreshes(ctx, w, toRefresh, &remaining) + refreshed, err := c.runContractRefreshes(ctx, state, toRefresh, &remaining) if err != nil { c.logger.Errorf("failed to refresh contracts, err: %v", err) // continue } else { @@ -379,8 +413,8 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // to avoid forming new contracts as soon as we dip below // 'Contracts.Amount', we define a threshold but only if we have more // contracts than 'Contracts.Amount' already - threshold := state.cfg.Contracts.Amount - if uint64(len(contracts)) > state.cfg.Contracts.Amount { + threshold := state.WantedContracts() + if uint64(len(contracts)) > state.WantedContracts() { threshold = addLeeway(threshold, leewayPctRequiredContracts) } @@ -388,14 +422,14 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( var formed []api.ContractMetadata if uint64(len(updatedSet)) < threshold { // no need to try and form contracts if wallet is completely empty - wallet, err := c.ap.bus.Wallet(ctx) + wallet, err := c.bus.Wallet(ctx) if err != nil { c.logger.Errorf("failed to fetch wallet, err: %v", err) return false, err } else if wallet.Confirmed.IsZero() && wallet.Unconfirmed.IsZero() { c.logger.Warn("contract formations skipped, wallet is empty") } else { - formed, err = c.runContractFormations(ctx, w, candidates, usedHosts, unusableHosts, state.cfg.Contracts.Amount-uint64(len(updatedSet)), &remaining) + formed, err = c.runContractFormations(ctx, state, candidates, usedHosts, unusableHosts, state.WantedContracts()-uint64(len(updatedSet)), &remaining) if err != nil { c.logger.Errorf("failed to form contracts, err: %v", err) // continue } else { @@ -413,15 +447,15 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( c.logger.Errorf("contract %v not found in contractData", contract.ID) } } - if len(updatedSet) > int(state.cfg.Contracts.Amount) { + if len(updatedSet) > int(state.WantedContracts()) { // sort by contract size sort.Slice(updatedSet, func(i, j int) bool { return contractData[updatedSet[i].ID] > contractData[updatedSet[j].ID] }) - for _, contract := range updatedSet[state.cfg.Contracts.Amount:] { + for _, contract := range updatedSet[state.WantedContracts():] { toStopUsing[contract.ID] = "truncated" } - updatedSet = updatedSet[:state.cfg.Contracts.Amount] + updatedSet = updatedSet[:state.WantedContracts()] } // convert to set of file contract ids @@ -431,19 +465,18 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // update contract set - if c.ap.isStopped() { - return false, errors.New("autopilot stopped before maintenance could be completed") - } - err = c.ap.bus.SetContractSet(ctx, state.cfg.Contracts.Set, newSet) + err = c.bus.SetContractSet(ctx, state.ContractSet(), newSet) if err != nil { return false, err } // return whether the maintenance changed the contract set - return c.computeContractSetChanged(ctx, state.cfg.Contracts.Set, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil + return c.computeContractSetChanged(ctx, state, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil } -func (c *contractor) computeContractSetChanged(ctx context.Context, name string, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { +func (c *Contractor) computeContractSetChanged(ctx context.Context, state *State, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { + name := state.ContractSet() + // build set lookups inOldSet := make(map[types.FileContractID]struct{}) for _, c := range oldSet { @@ -519,7 +552,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, // log a warning if the contract set does not contain enough contracts logFn := c.logger.Debugw - if len(newSet) < int(c.ap.State().rs.TotalShards) { + if len(newSet) < int(state.RS.TotalShards) { logFn = c.logger.Warnw } @@ -527,7 +560,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, var metrics []api.ContractSetChurnMetric for fcid := range setAdditions { metrics = append(metrics, api.ContractSetChurnMetric{ - Name: c.ap.state.cfg.Contracts.Set, + Name: state.ContractSet(), ContractID: fcid, Direction: api.ChurnDirAdded, Timestamp: now, @@ -535,7 +568,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, } for fcid, removal := range setRemovals { metrics = append(metrics, api.ContractSetChurnMetric{ - Name: c.ap.state.cfg.Contracts.Set, + Name: state.ContractSet(), ContractID: fcid, Direction: api.ChurnDirRemoved, Reason: removal.Removals[0].Reason, @@ -543,7 +576,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, }) } if len(metrics) > 0 { - if err := c.ap.bus.RecordContractSetChurnMetric(ctx, metrics...); err != nil { + if err := c.bus.RecordContractSetChurnMetric(ctx, metrics...); err != nil { c.logger.Error("failed to record contract set churn metric:", err) } } @@ -560,116 +593,28 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, ) hasChanged := len(setAdditions)+len(setRemovals) > 0 if hasChanged { - if !c.ap.HasAlert(ctx, alertChurnID) { + if !c.HasAlert(ctx, alertChurnID) { c.churn.Reset() } c.churn.Apply(setAdditions, setRemovals) - c.ap.RegisterAlert(ctx, c.churn.Alert(name)) + c.alerter.RegisterAlert(ctx, c.churn.Alert(name)) } return hasChanged } -func (c *contractor) performWalletMaintenance(ctx context.Context) error { - if c.ap.isStopped() { - return nil // skip contract maintenance if we're not synced - } - - c.logger.Info("performing wallet maintenance") - - // convenience variables - b := c.ap.bus - l := c.logger - state := c.ap.State() - cfg := state.cfg - period := state.period - renewWindow := cfg.Contracts.RenewWindow - - // no contracts - nothing to do - if cfg.Contracts.Amount == 0 { - l.Warn("wallet maintenance skipped, no contracts wanted") - return nil - } - - // no allowance - nothing to do - if cfg.Contracts.Allowance.IsZero() { - l.Warn("wallet maintenance skipped, no allowance set") - return nil - } - - // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) - if err != nil { - l.Warnf("wallet maintenance skipped, fetching consensus state failed with err: %v", err) - return err - } - - // fetch wallet balance - wallet, err := b.Wallet(ctx) - if err != nil { - l.Warnf("wallet maintenance skipped, fetching wallet balance failed with err: %v", err) - return err - } - balance := wallet.Confirmed - - // register an alert if balance is low - if balance.Cmp(cfg.Contracts.Allowance) < 0 { - c.ap.RegisterAlert(ctx, newAccountLowBalanceAlert(state.address, balance, cfg.Contracts.Allowance, cs.BlockHeight, renewWindow, endHeight(cfg, period))) - } else { - c.ap.DismissAlert(ctx, alertLowBalanceID) - } - - // pending maintenance transaction - nothing to do - pending, err := b.WalletPending(ctx) - if err != nil { - return nil - } - for _, txn := range pending { - for _, mTxnID := range c.maintenanceTxnIDs { - if mTxnID == txn.ID() { - l.Debugf("wallet maintenance skipped, pending transaction found with id %v", mTxnID) - return nil - } - } - } - - wantedNumOutputs := 10 - - // enough outputs - nothing to do - available, err := b.WalletOutputs(ctx) - if err != nil { - return err - } - if uint64(len(available)) >= uint64(wantedNumOutputs) { - l.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), uint64(wantedNumOutputs)) - return nil - } - wantedNumOutputs -= len(available) - - // figure out the amount per output - amount := cfg.Contracts.Allowance.Div64(uint64(wantedNumOutputs)) - - // redistribute outputs - ids, err := b.WalletRedistribute(ctx, wantedNumOutputs, amount) - if err != nil { - return fmt.Errorf("failed to redistribute wallet into %d outputs of amount %v, balance %v, err %v", wantedNumOutputs, amount, balance, err) - } - - l.Debugf("wallet maintenance succeeded, txns %v", ids) - c.maintenanceTxnIDs = ids - return nil -} - -func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { - if c.ap.isStopped() { +func (c *Contractor) runContractChecks(ctx context.Context, state *State, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { + select { + case <-ctx.Done(): return + default: } c.logger.Debug("running contract checks") // convenience variables - state := c.ap.State() + w := state.Worker // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return nil, nil, nil, nil, nil, err } @@ -680,7 +625,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts // calculate 'maxKeepLeeway' which defines the amount of contracts we'll be // lenient towards when we fail to either fetch a valid price table or the // contract's revision - maxKeepLeeway := addLeeway(state.cfg.Contracts.Amount, 1-leewayPctRequiredContracts) + maxKeepLeeway := addLeeway(state.WantedContracts(), 1-leewayPctRequiredContracts) remainingKeepLeeway := maxKeepLeeway var notfound int @@ -709,10 +654,13 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts }) // check all contracts +LOOP: for _, contract := range contracts { - // break if autopilot is stopped - if c.ap.isStopped() { - break + // break if interrupted + select { + case <-ctx.Done(): + break LOOP + default: } // convenience variables @@ -735,7 +683,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts // fetch host from hostdb hk := contract.HostKey - host, err := c.ap.bus.Host(ctx, hk) + host, err := c.bus.Host(ctx, hk) if err != nil { c.logger.Errorw(fmt.Sprintf("missing host, err: %v", err), "hk", hk) toStopUsing[fcid] = errHostNotFound.Error() @@ -761,14 +709,14 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } // refresh the consensus state - if css, err := c.ap.bus.ConsensusState(ctx); err != nil { + if css, err := c.bus.ConsensusState(ctx); err != nil { c.logger.Errorf("could not fetch consensus state, err: %v", err) } else { cs = css } // use a new gouging checker for every contract - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) // set the host's block height to ours to disable the height check in // the gouging checks, in certain edge cases the renter might unsync and @@ -777,7 +725,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, contract.FileSize()) + usable, unusableResult := isUsableHost(state.Config(), state.RS, gc, host, minScore, contract.FileSize()) if !usable { reasons := unusableResult.reasons() toStopUsing[fcid] = strings.Join(reasons, ",") @@ -791,7 +739,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts if contract.Revision == nil { if _, found := inCurrentSet[fcid]; !found || remainingKeepLeeway == 0 { toStopUsing[fcid] = errContractNoRevision.Error() - } else if !state.cfg.Hosts.AllowRedundantIPs && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { + } else if !state.AllowRedundantIPs() && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { toStopUsing[fcid] = fmt.Sprintf("%v; %v", errHostRedundantIP, errContractNoRevision) } else { toKeep = append(toKeep, contract.ContractMetadata) @@ -814,7 +762,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts // decide whether the contract is still good ci := contractInfo{contract: contract, priceTable: host.PriceTable.HostPriceTable, settings: host.Settings} - usable, recoverable, refresh, renew, reasons := c.isUsableContract(state.cfg, state, ci, cs.BlockHeight, ipFilter) + usable, recoverable, refresh, renew, reasons := c.isUsableContract(state.Config(), state.RS, ci, cs.BlockHeight, ipFilter) ci.usable = usable ci.recoverable = recoverable if !usable { @@ -844,19 +792,21 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { - if c.ap.isStopped() { +func (c *Contractor) runContractFormations(ctx context.Context, state *State, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { + select { + case <-ctx.Done(): return nil, nil + default: } // convenience variables - state := c.ap.State() - shouldFilter := !state.cfg.Hosts.AllowRedundantIPs + w := state.Worker + shouldFilter := !state.AllowRedundantIPs() c.logger.Debugw( "run contract formations", "usedHosts", len(usedHosts), - "required", state.cfg.Contracts.Amount, + "required", state.WantedContracts(), "missing", missing, "budget", budget, ) @@ -887,14 +837,14 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid } // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return nil, err } lastStateUpdate := time.Now() // prepare a gouging checker - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) // prepare an IP filter that contains all used hosts ipFilter := c.newIPFilter() @@ -907,14 +857,17 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid } // calculate min/max contract funds - minInitialContractFunds, maxInitialContractFunds := initialContractFundingMinMax(state.cfg) + minInitialContractFunds, maxInitialContractFunds := initialContractFundingMinMax(state.Config()) +LOOP: for h := 0; missing > 0 && h < len(selected); h++ { host := selected[h].host // break if the autopilot is stopped - if c.ap.isStopped() { - break + select { + case <-ctx.Done(): + break LOOP + default: } // fetch a new price table if necessary @@ -927,11 +880,11 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid // frequently to ensure we're not performing gouging checks with old // consensus state if time.Since(lastStateUpdate) > time.Minute { - if css, err := c.ap.bus.ConsensusState(ctx); err != nil { + if css, err := c.bus.ConsensusState(ctx); err != nil { c.logger.Errorf("could not fetch consensus state, err: %v", err) } else { cs = css - gc = worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + gc = worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) } } @@ -946,7 +899,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid continue } - formedContract, proceed, err := c.formContract(ctx, w, host, minInitialContractFunds, maxInitialContractFunds, budget) + formedContract, proceed, err := c.formContract(ctx, state, host, minInitialContractFunds, maxInitialContractFunds, budget) if err == nil { // add contract to contract set formed = append(formed, formedContract) @@ -964,12 +917,12 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid // contracts. Since we are migrating away from all contracts not in the set and // are not uploading to those contracts anyway, we only worry about contracts in // the set. -func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allContracts []api.Contract, isInSet map[types.FileContractID]struct{}) { +func (c *Contractor) runRevisionBroadcast(ctx context.Context, w Worker, allContracts []api.Contract, isInSet map[types.FileContractID]struct{}) { if c.revisionBroadcastInterval == 0 { return // not enabled } - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { c.logger.Warnf("revision broadcast failed to fetch blockHeight: %v", err) return @@ -1029,7 +982,7 @@ func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } } -func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { +func (c *Contractor) runContractRenewals(ctx context.Context, state *State, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { c.logger.Debugw( "run contracts renewals", "torenew", len(toRenew), @@ -1047,9 +1000,11 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew var i int for i = 0; i < len(toRenew); i++ { - // check if the autopilot is stopped - if c.ap.isStopped() { + // check if interrupted + select { + case <-ctx.Done(): return + default: } // limit the number of contracts to renew @@ -1059,14 +1014,14 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew // renew and add if it succeeds or if its usable contract := toRenew[i].contract.ContractMetadata - renewed, proceed, err := c.renewContract(ctx, w, toRenew[i], budget) + renewed, proceed, err := c.renewContract(ctx, state, toRenew[i], budget) if err != nil { - c.ap.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) + c.alerter.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) if toRenew[i].usable { toKeep = append(toKeep, toRenew[i].contract.ContractMetadata) } } else { - c.ap.DismissAlert(ctx, alertIDForContract(alertRenewalFailedID, contract.ID)) + c.alerter.DismissAlerts(ctx, alerts.IDForContract(alertRenewalFailedID, contract.ID)) renewals = append(renewals, renewal{from: contract, to: renewed, ci: toRenew[i]}) } @@ -1087,7 +1042,7 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew return renewals, toKeep } -func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { +func (c *Contractor) runContractRefreshes(ctx context.Context, state *State, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { c.logger.Debugw( "run contracts refreshes", "torefresh", len(toRefresh), @@ -1102,13 +1057,15 @@ func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefre }() for _, ci := range toRefresh { - // check if the autopilot is stopped - if c.ap.isStopped() { + // check if interrupted + select { + case <-ctx.Done(): return + default: } // refresh and add if it succeeds - renewed, proceed, err := c.refreshContract(ctx, w, ci, budget) + renewed, proceed, err := c.refreshContract(ctx, state, ci, budget) if err == nil { refreshed = append(refreshed, renewal{from: ci.contract.ContractMetadata, to: renewed, ci: ci}) } @@ -1122,7 +1079,7 @@ func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefre return refreshed, nil } -func (c *contractor) initialContractFunding(settings rhpv2.HostSettings, txnFee, min, max types.Currency) types.Currency { +func (c *Contractor) initialContractFunding(settings rhpv2.HostSettings, txnFee, min, max types.Currency) types.Currency { if !max.IsZero() && min.Cmp(max) > 0 { panic("given min is larger than max") // developer error } @@ -1137,7 +1094,7 @@ func (c *contractor) initialContractFunding(settings rhpv2.HostSettings, txnFee, return funding } -func (c *contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contractInfo, fee types.Currency) types.Currency { +func (c *Contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contractInfo, fee types.Currency) types.Currency { // refresh with 1.2x the funds refreshAmount := ci.contract.TotalCost.Mul64(6).Div64(5) @@ -1159,15 +1116,13 @@ func (c *contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contract return refreshAmountCapped } -func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { - state := c.ap.State() - +func (c *Contractor) renewFundingEstimate(ctx context.Context, state *State, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { // estimate the cost of the current data stored dataStored := ci.contract.FileSize() - storageCost := sectorStorageCost(ci.priceTable, state.cfg.Contracts.Period).Mul64(bytesToSectors(dataStored)) + storageCost := sectorStorageCost(ci.priceTable, state.Period()).Mul64(bytesToSectors(dataStored)) // fetch the spending of the contract we want to renew. - prevSpending, err := c.contractSpending(ctx, ci.contract, state.period) + prevSpending, err := c.contractSpending(ctx, ci.contract, state.Period()) if err != nil { c.logger.Errorw( fmt.Sprintf("could not retrieve contract spending, err: %v", err), @@ -1182,7 +1137,7 @@ func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, // TODO: estimate is not ideal because price can change, better would be to // look at the amount of data stored in the contract from the previous cycle prevUploadDataEstimate := types.NewCurrency64(dataStored) // default to assuming all data was uploaded - sectorUploadCost := sectorUploadCost(ci.priceTable, state.cfg.Contracts.Period) + sectorUploadCost := sectorUploadCost(ci.priceTable, state.Period()) if !sectorUploadCost.IsZero() { prevUploadDataEstimate = prevSpending.Uploads.Div(sectorUploadCost).Mul64(rhpv2.SectorSize) } @@ -1205,7 +1160,7 @@ func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, // the file contract (and the transaction fee goes to the miners, not the // file contract). subTotal := storageCost.Add(newUploadsCost).Add(newDownloadsCost).Add(newFundAccountCost).Add(ci.settings.ContractPrice) - siaFundFeeEstimate, err := c.ap.bus.FileContractTax(ctx, subTotal) + siaFundFeeEstimate, err := c.bus.FileContractTax(ctx, subTotal) if err != nil { return types.ZeroCurrency, err } @@ -1220,7 +1175,7 @@ func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, // check for a sane minimum that is equal to the initial contract funding // but without an upper cap. - minInitialContractFunds, _ := initialContractFundingMinMax(state.cfg) + minInitialContractFunds, _ := initialContractFundingMinMax(state.Config()) minimum := c.initialContractFunding(ci.settings, txnFeeEstimate, minInitialContractFunds, types.ZeroCurrency) cappedEstimatedCost := estimatedCost if cappedEstimatedCost.Cmp(minimum) < 0 { @@ -1246,7 +1201,7 @@ func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo, return cappedEstimatedCost, nil } -func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uint64) float64 { +func (c *Contractor) calculateMinScore(candidates []scoredHost, numContracts uint64) float64 { // return early if there's no hosts if len(candidates) == 0 { c.logger.Warn("min host score is set to the smallest non-zero float because there are no candidate hosts") @@ -1297,18 +1252,17 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *Contractor) candidateHosts(ctx context.Context, state *State, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { start := time.Now() // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return nil, unusableHostResult{}, err } // create a gouging checker - state := c.ap.State() - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) // select unused hosts that passed a scan var unused []hostdb.HostInfo @@ -1346,7 +1300,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo // NOTE: ignore the pricetable's HostBlockHeight by setting it to our // own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - usable, result := isUsableHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) + usable, result := isUsableHost(state.Config(), state.RS, gc, h, minScore, storedData[h.PublicKey]) if usable { candidates = append(candidates, scoredHost{h.Host, result.scoreBreakdown.Score()}) continue @@ -1368,28 +1322,27 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo return candidates, unusableHostResult, nil } -func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) renewContract(ctx context.Context, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't renew contract without a revision") } // convenience variables - state := c.ap.State() - cfg := state.cfg contract := ci.contract settings := ci.settings fcid := contract.ID rev := contract.Revision hk := contract.HostKey + w := state.Worker // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return api.ContractMetadata{}, false, err } // calculate the renter funds - renterFunds, err := c.renewFundingEstimate(ctx, ci, state.fee, true) + renterFunds, err := c.renewFundingEstimate(ctx, state, ci, state.Fee, true) if err != nil { c.logger.Errorw(fmt.Sprintf("could not get renew funding estimate, err: %v", err), "hk", hk, "fcid", fcid) return api.ContractMetadata{}, true, err @@ -1402,9 +1355,9 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf } // sanity check the endheight is not the same on renewals - endHeight := endHeight(cfg, state.period) + endHeight := state.EndHeight() if endHeight <= rev.EndHeight() { - c.logger.Debugw("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", state.period, "bh", cs.BlockHeight) + c.logger.Debugw("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", state.Period, "bh", cs.BlockHeight) return api.ContractMetadata{}, false, fmt.Errorf("renewal endheight should surpass the current contract endheight, %v <= %v", endHeight, rev.EndHeight()) } @@ -1412,7 +1365,7 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf expectedNewStorage := renterFundsToExpectedStorage(renterFunds, endHeight-cs.BlockHeight, ci.priceTable) // renew the contract - resp, err := w.RHPRenew(ctx, fcid, endHeight, hk, contract.SiamuxAddr, settings.Address, state.address, renterFunds, types.ZeroCurrency, expectedNewStorage, settings.WindowSize) + resp, err := w.RHPRenew(ctx, fcid, endHeight, hk, contract.SiamuxAddr, settings.Address, state.Address, renterFunds, types.ZeroCurrency, expectedNewStorage, settings.WindowSize) if err != nil { c.logger.Errorw( "renewal failed", @@ -1433,7 +1386,7 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf *budget = budget.Sub(renterFunds) // persist the contract - renewedContract, err := c.ap.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, fcid, api.ContractStatePending) + renewedContract, err := c.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, fcid, api.ContractStatePending) if err != nil { c.logger.Errorw(fmt.Sprintf("renewal failed to persist, err: %v", err), "hk", hk, "fcid", fcid) return api.ContractMetadata{}, false, err @@ -1450,29 +1403,29 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf return renewedContract, true, nil } -func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) refreshContract(ctx context.Context, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't refresh contract without a revision") } // convenience variables - state := c.ap.State() contract := ci.contract settings := ci.settings fcid := contract.ID rev := contract.Revision hk := contract.HostKey + w := state.Worker // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return api.ContractMetadata{}, false, err } // calculate the renter funds var renterFunds types.Currency - if isOutOfFunds(state.cfg, ci.priceTable, ci.contract) { - renterFunds = c.refreshFundingEstimate(state.cfg, ci, state.fee) + if isOutOfFunds(state.Config(), ci.priceTable, ci.contract) { + renterFunds = c.refreshFundingEstimate(state.Config(), ci, state.Fee) } else { renterFunds = rev.ValidRenterPayout() // don't increase funds } @@ -1487,10 +1440,10 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI unallocatedCollateral := contract.RemainingCollateral() // a refresh should always result in a contract that has enough collateral - minNewCollateral := minRemainingCollateral(state.cfg, state.rs, renterFunds, settings, ci.priceTable).Mul64(2) + minNewCollateral := minRemainingCollateral(state.Config(), state.RS, renterFunds, settings, ci.priceTable).Mul64(2) // renew the contract - resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, state.address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize) + resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, state.Address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize) if err != nil { if strings.Contains(err.Error(), "new collateral is too low") { c.logger.Debugw("refresh failed: contract wouldn't have enough collateral after refresh", @@ -1512,7 +1465,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI *budget = budget.Sub(renterFunds) // persist the contract - refreshedContract, err := c.ap.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, contract.ID, api.ContractStatePending) + refreshedContract, err := c.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, contract.ID, api.ContractStatePending) if err != nil { c.logger.Errorw("adding refreshed contract failed", zap.Error(err), "hk", hk, "fcid", fcid) return api.ContractMetadata{}, false, err @@ -1530,9 +1483,10 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI return refreshedContract, true, nil } -func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) formContract(ctx context.Context, state *State, host hostdb.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { + w := state.Worker + // convenience variables - state := c.ap.State() hk := host.PublicKey // fetch host settings @@ -1543,13 +1497,13 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos } // fetch consensus state - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return api.ContractMetadata{}, false, err } // check our budget - txnFee := state.fee.Mul64(estimatedFileContractTransactionSetSize) + txnFee := state.Fee.Mul64(estimatedFileContractTransactionSetSize) renterFunds := initialContractFunding(scan.Settings, txnFee, minInitialContractFunds, maxInitialContractFunds) if budget.Cmp(renterFunds) < 0 { c.logger.Debugw("insufficient budget", "budget", budget, "needed", renterFunds) @@ -1557,12 +1511,12 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos } // calculate the host collateral - endHeight := endHeight(state.cfg, state.period) + endHeight := state.EndHeight() expectedStorage := renterFundsToExpectedStorage(renterFunds, endHeight-cs.BlockHeight, scan.PriceTable) - hostCollateral := rhpv2.ContractFormationCollateral(state.cfg.Contracts.Period, expectedStorage, scan.Settings) + hostCollateral := rhpv2.ContractFormationCollateral(state.Period(), expectedStorage, scan.Settings) // form contract - contract, _, err := w.RHPForm(ctx, endHeight, hk, host.NetAddress, state.address, renterFunds, hostCollateral) + contract, _, err := w.RHPForm(ctx, endHeight, hk, host.NetAddress, state.Address, renterFunds, hostCollateral) if err != nil { // TODO: keep track of consecutive failures and break at some point c.logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) @@ -1577,7 +1531,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos // persist contract in store contractPrice := contract.Revision.MissedHostPayout().Sub(hostCollateral) - formedContract, err := c.ap.bus.AddContract(ctx, contract, contractPrice, renterFunds, cs.BlockHeight, api.ContractStatePending) + formedContract, err := c.bus.AddContract(ctx, contract, contractPrice, renterFunds, cs.BlockHeight, api.ContractStatePending) if err != nil { c.logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) return api.ContractMetadata{}, true, err @@ -1592,38 +1546,6 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos return formedContract, true, nil } -func (c *contractor) tryPerformPruning(wp *workerPool) { - c.mu.Lock() - if c.pruning || c.ap.isStopped() { - c.mu.Unlock() - return - } - c.pruning = true - c.pruningLastStart = time.Now() - c.mu.Unlock() - - c.ap.wg.Add(1) - go func() { - defer c.ap.wg.Done() - c.performContractPruning(wp) - c.mu.Lock() - c.pruning = false - c.mu.Unlock() - }() -} - -func (c *contractor) hostForContract(ctx context.Context, fcid types.FileContractID) (host hostdb.HostInfo, metadata api.ContractMetadata, err error) { - // fetch the contract - metadata, err = c.ap.bus.Contract(ctx, fcid) - if err != nil { - return - } - - // fetch the host - host, err = c.ap.bus.Host(ctx, metadata.HostKey) - return -} - func addLeeway(n uint64, pct float64) uint64 { if pct < 0 { panic("given leeway percent has to be positive") @@ -1631,10 +1553,6 @@ func addLeeway(n uint64, pct float64) uint64 { return uint64(math.Ceil(float64(n) * pct)) } -func endHeight(cfg api.AutopilotConfig, currentPeriod uint64) uint64 { - return currentPeriod + cfg.Contracts.Period + cfg.Contracts.RenewWindow -} - func initialContractFunding(settings rhpv2.HostSettings, txnFee, min, max types.Currency) types.Currency { if !max.IsZero() && min.Cmp(max) > 0 { panic("given min is larger than max") // developer error @@ -1698,3 +1616,17 @@ func renterFundsToExpectedStorage(renterFunds types.Currency, duration uint64, p } return expectedStorage.Big().Uint64() } + +func (c *Contractor) HasAlert(ctx context.Context, id types.Hash256) bool { + ar, err := c.alerter.Alerts(ctx, alerts.AlertsOpts{Offset: 0, Limit: -1}) + if err != nil { + c.logger.Errorf("failed to fetch alerts: %v", err) + return false + } + for _, alert := range ar.Alerts { + if alert.ID == id { + return true + } + } + return false +} diff --git a/autopilot/contractor_test.go b/autopilot/contractor/contractor_test.go similarity index 96% rename from autopilot/contractor_test.go rename to autopilot/contractor/contractor_test.go index 575605612..1c0e0cb69 100644 --- a/autopilot/contractor_test.go +++ b/autopilot/contractor/contractor_test.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "math" @@ -8,7 +8,7 @@ import ( ) func TestCalculateMinScore(t *testing.T) { - c := &contractor{ + c := &Contractor{ logger: zap.NewNop().Sugar(), } diff --git a/autopilot/contractor/evaluate.go b/autopilot/contractor/evaluate.go new file mode 100644 index 000000000..b34cd92a1 --- /dev/null +++ b/autopilot/contractor/evaluate.go @@ -0,0 +1,176 @@ +package contractor + +import ( + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/worker" +) + +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + for _, host := range hosts { + usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) + if usable { + usables++ + } + } + return +} + +// EvaluateConfig evaluates the given configuration and if the gouging settings +// are too strict for the number of contracts required by 'cfg', it will provide +// a recommendation on how to loosen it. +func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) + + resp.Hosts = uint64(len(hosts)) + for _, host := range hosts { + usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) + if usable { + resp.Usable++ + continue + } + if usableBreakdown.blocked > 0 { + resp.Unusable.Blocked++ + } + if usableBreakdown.notacceptingcontracts > 0 { + resp.Unusable.NotAcceptingContracts++ + } + if usableBreakdown.notcompletingscan > 0 { + resp.Unusable.NotScanned++ + } + if usableBreakdown.unknown > 0 { + resp.Unusable.Unknown++ + } + if usableBreakdown.gougingBreakdown.ContractErr != "" { + resp.Unusable.Gouging.Contract++ + } + if usableBreakdown.gougingBreakdown.DownloadErr != "" { + resp.Unusable.Gouging.Download++ + } + if usableBreakdown.gougingBreakdown.GougingErr != "" { + resp.Unusable.Gouging.Gouging++ + } + if usableBreakdown.gougingBreakdown.PruneErr != "" { + resp.Unusable.Gouging.Pruning++ + } + if usableBreakdown.gougingBreakdown.UploadErr != "" { + resp.Unusable.Gouging.Upload++ + } + } + + if resp.Usable >= cfg.Contracts.Amount { + return // no recommendation needed + } + + // optimise gouging settings + maxGS := func() api.GougingSettings { + return api.GougingSettings{ + // these are the fields we optimise one-by-one + MaxRPCPrice: types.MaxCurrency, + MaxContractPrice: types.MaxCurrency, + MaxDownloadPrice: types.MaxCurrency, + MaxUploadPrice: types.MaxCurrency, + MaxStoragePrice: types.MaxCurrency, + + // these are not optimised, so we keep the same values as the user + // provided + HostBlockHeightLeeway: gs.HostBlockHeightLeeway, + MinPriceTableValidity: gs.MinPriceTableValidity, + MinAccountExpiry: gs.MinAccountExpiry, + MinMaxEphemeralAccountBalance: gs.MinMaxEphemeralAccountBalance, + MigrationSurchargeMultiplier: gs.MigrationSurchargeMultiplier, + } + } + + // use the input gouging settings as the starting point and try to optimise + // each field independent of the other fields we want to optimise + optimisedGS := gs + success := false + + // MaxRPCPrice + tmpGS := maxGS() + tmpGS.MaxRPCPrice = gs.MaxRPCPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxRPCPrice = tmpGS.MaxRPCPrice + success = true + } + // MaxContractPrice + tmpGS = maxGS() + tmpGS.MaxContractPrice = gs.MaxContractPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxContractPrice = tmpGS.MaxContractPrice + success = true + } + // MaxDownloadPrice + tmpGS = maxGS() + tmpGS.MaxDownloadPrice = gs.MaxDownloadPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxDownloadPrice = tmpGS.MaxDownloadPrice + success = true + } + // MaxUploadPrice + tmpGS = maxGS() + tmpGS.MaxUploadPrice = gs.MaxUploadPrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxUploadPrice = tmpGS.MaxUploadPrice + success = true + } + // MaxStoragePrice + tmpGS = maxGS() + tmpGS.MaxStoragePrice = gs.MaxStoragePrice + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, currentPeriod, rs, hosts) { + optimisedGS.MaxStoragePrice = tmpGS.MaxStoragePrice + success = true + } + // If one of the optimisations was successful, we return the optimised + // gouging settings + if success { + resp.Recommendation = &api.ConfigRecommendation{ + GougingSettings: optimisedGS, + } + } + return +} + +// optimiseGougingSetting tries to optimise one field of the gouging settings to +// try and hit the target number of contracts. +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { + if cfg.Contracts.Amount == 0 { + return true // nothing to do + } + stepSize := []uint64{200, 150, 125, 110, 105} + maxSteps := 12 + + stepIdx := 0 + nSteps := 0 + prevVal := *field // to keep accurate value + for { + nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) + targetHit := nUsable >= cfg.Contracts.Amount + + if targetHit && nSteps == 0 { + return true // target already hit without optimising + } else if targetHit && stepIdx == len(stepSize)-1 { + return true // target hit after optimising + } else if targetHit { + // move one step back and decrease step size + stepIdx++ + nSteps-- + *field = prevVal + } else if nSteps >= maxSteps { + return false // ran out of steps + } + + // apply next step + prevVal = *field + newValue, overflow := prevVal.Mul64WithOverflow(stepSize[stepIdx]) + if overflow { + return false + } + newValue = newValue.Div64(100) + *field = newValue + nSteps++ + } +} diff --git a/autopilot/autopilot_test.go b/autopilot/contractor/evaluate_test.go similarity index 99% rename from autopilot/autopilot_test.go rename to autopilot/contractor/evaluate_test.go index 9ebafe675..66b2023c7 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/contractor/evaluate_test.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "math" diff --git a/autopilot/hostfilter.go b/autopilot/contractor/hostfilter.go similarity index 97% rename from autopilot/hostfilter.go rename to autopilot/contractor/hostfilter.go index 8de37221a..998bad2a0 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/contractor/hostfilter.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "errors" @@ -229,7 +229,7 @@ func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker. // - recoverable -> can be usable in the contract set if it is refreshed/renewed // - refresh -> should be refreshed // - renew -> should be renewed -func (c *contractor) isUsableContract(cfg api.AutopilotConfig, state state, ci contractInfo, bh uint64, f *ipFilter) (usable, recoverable, refresh, renew bool, reasons []string) { +func (c *Contractor) isUsableContract(cfg api.AutopilotConfig, rs api.RedundancySettings, ci contractInfo, bh uint64, f *ipFilter) (usable, recoverable, refresh, renew bool, reasons []string) { contract, s, pt := ci.contract, ci.settings, ci.priceTable usable = true @@ -246,7 +246,7 @@ func (c *contractor) isUsableContract(cfg api.AutopilotConfig, state state, ci c refresh = false renew = false } else { - if isOutOfCollateral(cfg, state.rs, contract, s, pt) { + if isOutOfCollateral(cfg, rs, contract, s, pt) { reasons = append(reasons, errContractOutOfCollateral.Error()) usable = false recoverable = true diff --git a/autopilot/hostfilter_test.go b/autopilot/contractor/hostfilter_test.go similarity index 99% rename from autopilot/hostfilter_test.go rename to autopilot/contractor/hostfilter_test.go index ec9ea3943..0ad67571e 100644 --- a/autopilot/hostfilter_test.go +++ b/autopilot/contractor/hostfilter_test.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "math" diff --git a/autopilot/hostinfo.go b/autopilot/contractor/hostinfo.go similarity index 72% rename from autopilot/hostinfo.go rename to autopilot/contractor/hostinfo.go index e0cbecadc..2483a2ee5 100644 --- a/autopilot/hostinfo.go +++ b/autopilot/contractor/hostinfo.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "context" @@ -10,50 +10,36 @@ import ( "go.sia.tech/renterd/worker" ) -func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api.HostHandlerResponse, error) { - state := c.ap.State() - - if state.cfg.Contracts.Allowance.IsZero() { +func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, state *State) (api.HostHandlerResponse, error) { + if state.Config().Contracts.Allowance.IsZero() { return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contracts allowance is zero") } - if state.cfg.Contracts.Amount == 0 { + if state.Config().Contracts.Amount == 0 { return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contracts amount is zero") } - if state.cfg.Contracts.Period == 0 { + if state.Period() == 0 { return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contract period is zero") } - host, err := c.ap.bus.Host(ctx, hostKey) + host, err := c.bus.Host(ctx, hostKey) if err != nil { return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch requested host from bus: %w", err) } - gs, err := c.ap.bus.GougingSettings(ctx) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch gouging settings from bus: %w", err) - } - rs, err := c.ap.bus.RedundancySettings(ctx) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch redundancy settings from bus: %w", err) - } - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch consensus state from bus: %w", err) } - fee, err := c.ap.bus.RecommendedFee(ctx) - if err != nil { - return api.HostHandlerResponse{}, fmt.Errorf("failed to fetch recommended fee from bus: %w", err) - } c.mu.Lock() storedData := c.cachedDataStored[hostKey] minScore := c.cachedMinScore c.mu.Unlock() - gc := worker.NewGougingChecker(gs, cs, fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) + gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.Config(), state.RS, gc, host, minScore, storedData) return api.HostHandlerResponse{ Host: host.Host, Checks: &api.HostHandlerResponseChecks{ @@ -67,7 +53,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api }, nil } -func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo) (hi hostInfo, found bool) { +func (c *Contractor) hostInfoFromCache(ctx context.Context, state *State, host hostdb.HostInfo) (hi hostInfo, found bool) { // grab host details from cache c.mu.Lock() hi, found = c.cachedHostInfo[host.PublicKey] @@ -84,13 +70,12 @@ func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo // inconsistency would resolve itself but trying to update it here improves // first time user experience if host.Scanned && hi.UnusableResult.notcompletingscan > 0 { - cs, err := c.ap.bus.ConsensusState(ctx) + cs, err := c.bus.ConsensusState(ctx) if err != nil { c.logger.Error("failed to fetch consensus state from bus: %v", err) } else { - state := c.ap.State() - gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, storedData) + gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + isUsable, unusableResult := isUsableHost(state.Config(), state.RS, gc, host, minScore, storedData) hi = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, @@ -106,7 +91,7 @@ func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo return } -func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostHandlerResponse, error) { +func (c *Contractor) HostInfos(ctx context.Context, state *State, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostHandlerResponse, error) { // declare helper to decide whether to keep a host. if !isValidUsabilityFilterMode(usabilityMode) { return nil, fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) @@ -131,7 +116,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a wanted := limit for { // fetch up to 'limit' hosts. - hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{ + hosts, err := c.bus.SearchHosts(ctx, api.SearchHostOptions{ Offset: offset, Limit: limit, FilterMode: filterMode, @@ -151,7 +136,7 @@ func (c *contractor) HostInfos(ctx context.Context, filterMode, usabilityMode, a // decide how many of the returned hosts to keep. var keptHosts int for _, host := range hosts { - hi, cached := c.hostInfoFromCache(ctx, host) + hi, cached := c.hostInfoFromCache(ctx, state, host) if !cached { // when the filterMode is "all" we include uncached hosts and // set IsChecked = false. diff --git a/autopilot/hosts.go b/autopilot/contractor/hosts.go similarity index 98% rename from autopilot/hosts.go rename to autopilot/contractor/hosts.go index aba45ee87..6fbd9f9e7 100644 --- a/autopilot/hosts.go +++ b/autopilot/contractor/hosts.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import "lukechampine.com/frand" diff --git a/autopilot/hosts_test.go b/autopilot/contractor/hosts_test.go similarity index 98% rename from autopilot/hosts_test.go rename to autopilot/contractor/hosts_test.go index 332bf1ea3..aa46647c1 100644 --- a/autopilot/hosts_test.go +++ b/autopilot/contractor/hosts_test.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "math" diff --git a/autopilot/hostscore.go b/autopilot/contractor/hostscore.go similarity index 99% rename from autopilot/hostscore.go rename to autopilot/contractor/hostscore.go index 3c26dce42..427ea6926 100644 --- a/autopilot/hostscore.go +++ b/autopilot/contractor/hostscore.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "math" diff --git a/autopilot/hostscore_test.go b/autopilot/contractor/hostscore_test.go similarity index 92% rename from autopilot/hostscore_test.go rename to autopilot/contractor/hostscore_test.go index e48417235..76b6c3bae 100644 --- a/autopilot/hostscore_test.go +++ b/autopilot/contractor/hostscore_test.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "math" @@ -10,6 +10,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/test" ) var cfg = api.AutopilotConfig{ @@ -35,10 +36,10 @@ func TestHostScore(t *testing.T) { day := 24 * time.Hour newHost := func(s rhpv2.HostSettings) hostdb.Host { - return newTestHost(randomHostKey(), newTestHostPriceTable(), s) + return test.NewHost(test.RandomHostKey(), test.NewHostPriceTable(), s) } - h1 := newHost(newTestHostSettings()) - h2 := newHost(newTestHostSettings()) + h1 := newHost(test.NewHostSettings()) + h2 := newHost(test.NewHostSettings()) // assert both hosts score equal redundancy := 3.0 @@ -53,7 +54,7 @@ func TestHostScore(t *testing.T) { } // assert collateral affects the score - settings := newTestHostSettings() + settings := test.NewHostSettings() settings.Collateral = settings.Collateral.Div64(2) settings.MaxCollateral = settings.MaxCollateral.Div64(2) h1 = newHost(settings) // reset @@ -62,21 +63,21 @@ func TestHostScore(t *testing.T) { } // assert interactions affect the score - h1 = newHost(newTestHostSettings()) // reset + h1 = newHost(test.NewHostSettings()) // reset h1.Interactions.SuccessfulInteractions++ if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { t.Fatal("unexpected") } // assert uptime affects the score - h2 = newHost(newTestHostSettings()) // reset + h2 = newHost(test.NewHostSettings()) // reset h2.Interactions.SecondToLastScanSuccess = false if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() || ageScore(h1) != ageScore(h2) { t.Fatal("unexpected") } // assert version affects the score - h2Settings := newTestHostSettings() + h2Settings := test.NewHostSettings() h2Settings.Version = "1.5.6" // lower h2 = newHost(h2Settings) // reset if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { @@ -84,21 +85,21 @@ func TestHostScore(t *testing.T) { } // asseret remaining storage affects the score. - h1 = newHost(newTestHostSettings()) // reset + h1 = newHost(test.NewHostSettings()) // reset h2.Settings.RemainingStorage = 100 if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { t.Fatal("unexpected") } // assert MaxCollateral affects the score. - h2 = newHost(newTestHostSettings()) // reset + h2 = newHost(test.NewHostSettings()) // reset h2.PriceTable.MaxCollateral = types.ZeroCurrency if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { t.Fatal("unexpected") } // assert price affects the score. - h2 = newHost(newTestHostSettings()) // reset + h2 = newHost(test.NewHostSettings()) // reset h2.PriceTable.WriteBaseCost = types.Siacoins(1) if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { t.Fatal("unexpected") diff --git a/autopilot/ipfilter.go b/autopilot/contractor/ipfilter.go similarity index 94% rename from autopilot/ipfilter.go rename to autopilot/contractor/ipfilter.go index 0932d7676..0573d8a04 100644 --- a/autopilot/ipfilter.go +++ b/autopilot/contractor/ipfilter.go @@ -1,11 +1,10 @@ -package autopilot +package contractor import ( "context" "errors" "fmt" "net" - "strings" "time" "go.sia.tech/core/types" @@ -28,7 +27,7 @@ const ( ) var ( - errIOTimeout = errors.New("i/o timeout") + ErrIOTimeout = errors.New("i/o timeout") errServerMisbehaving = errors.New("server misbehaving") errTooManyAddresses = errors.New("host has more than two addresses, or two of the same type") errUnparsableAddress = errors.New("host address could not be parsed to a subnet") @@ -43,7 +42,7 @@ type ( } ) -func (c *contractor) newIPFilter() *ipFilter { +func (c *Contractor) newIPFilter() *ipFilter { c.resolver.pruneCache() return &ipFilter{ subnetToHostKey: make(map[string]string), @@ -57,7 +56,7 @@ func (f *ipFilter) IsRedundantIP(hostIP string, hostKey types.PublicKey) bool { // perform lookup subnets, err := f.resolver.lookup(hostIP) if err != nil { - if !strings.Contains(err.Error(), errNoSuchHost.Error()) { + if !utils.IsErr(err, utils.ErrNoSuchHost) { f.logger.Errorf("failed to check for redundant IP, treating host %v with IP %v as redundant, err: %v", hostKey, hostIP, err) } return true @@ -138,7 +137,7 @@ func (r *ipResolver) lookup(hostIP string) ([]string, error) { addrs, err := r.resolver.LookupIPAddr(ctx, host) if err != nil { // check the cache if it's an i/o timeout or server misbehaving error - if utils.IsErr(err, errIOTimeout) || utils.IsErr(err, errServerMisbehaving) { + if utils.IsErr(err, ErrIOTimeout) || utils.IsErr(err, errServerMisbehaving) { if entry, found := r.cache[hostIP]; found && time.Since(entry.created) < ipCacheEntryValidity { r.logger.Debugf("using cached IP addresses for %v, err: %v", hostIP, err) return entry.subnets, nil diff --git a/autopilot/ipfilter_test.go b/autopilot/contractor/ipfilter_test.go similarity index 97% rename from autopilot/ipfilter_test.go rename to autopilot/contractor/ipfilter_test.go index 29fc3c8cf..63be78753 100644 --- a/autopilot/ipfilter_test.go +++ b/autopilot/contractor/ipfilter_test.go @@ -1,4 +1,4 @@ -package autopilot +package contractor import ( "context" @@ -67,21 +67,21 @@ func TestIPResolver(t *testing.T) { } // test IO timeout - no cache entry - r.setNextErr(errIOTimeout) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errIOTimeout) { + r.setNextErr(ErrIOTimeout) + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, ErrIOTimeout) { t.Fatal("unexpected error", err) } // test IO timeout - expired cache entry ipr.cache["example.com:1234"] = ipCacheEntry{subnets: []string{"a"}} - r.setNextErr(errIOTimeout) - if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, errIOTimeout) { + r.setNextErr(ErrIOTimeout) + if _, err := ipr.lookup("example.com:1234"); !utils.IsErr(err, ErrIOTimeout) { t.Fatal("unexpected error", err) } // test IO timeout - live cache entry ipr.cache["example.com:1234"] = ipCacheEntry{created: time.Now(), subnets: []string{"a"}} - r.setNextErr(errIOTimeout) + r.setNextErr(ErrIOTimeout) if subnets, err := ipr.lookup("example.com:1234"); err != nil { t.Fatal("unexpected error", err) } else if len(subnets) != 1 || subnets[0] != "a" { diff --git a/autopilot/contractor/state.go b/autopilot/contractor/state.go new file mode 100644 index 000000000..769bdd8f4 --- /dev/null +++ b/autopilot/contractor/state.go @@ -0,0 +1,53 @@ +package contractor + +import ( + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" +) + +type ( + // State serves as input for the contractor's maintenance. It contains all + // state that should remain constant across a single round of contract + // performance. + State struct { + GS api.GougingSettings + RS api.RedundancySettings + AP api.Autopilot + + Address types.Address + Fee types.Currency + Worker Worker + } +) + +func (state *State) AllowRedundantIPs() bool { + return state.AP.Config.Hosts.AllowRedundantIPs +} + +func (state *State) Allowance() types.Currency { + return state.AP.Config.Contracts.Allowance +} + +func (state *State) Config() api.AutopilotConfig { + return state.AP.Config +} + +func (state *State) ContractSet() string { + return state.AP.Config.Contracts.Set +} + +func (s *State) EndHeight() uint64 { + return s.AP.EndHeight() +} + +func (state *State) WantedContracts() uint64 { + return state.AP.Config.Contracts.Amount +} + +func (state *State) Period() uint64 { + return state.AP.Config.Contracts.Period +} + +func (state *State) RenewWindow() uint64 { + return state.AP.Config.Contracts.RenewWindow +} diff --git a/autopilot/host_test.go b/autopilot/host_test.go index fa1a0ab44..0a3de2a71 100644 --- a/autopilot/host_test.go +++ b/autopilot/host_test.go @@ -1,20 +1,14 @@ package autopilot import ( - "net" "testing" - "time" - rhpv2 "go.sia.tech/core/rhp/v2" - rhpv3 "go.sia.tech/core/rhp/v3" - "go.sia.tech/core/types" - "go.sia.tech/renterd/hostdb" - "lukechampine.com/frand" + "go.sia.tech/renterd/internal/test" ) func TestHost(t *testing.T) { - hk := randomHostKey() - h := newTestHost(hk, newTestHostPriceTable(), newTestHostSettings()) + hk := test.RandomHostKey() + h := test.NewHost(hk, test.NewHostPriceTable(), test.NewHostSettings()) // assert host is online if !h.IsOnline() { @@ -39,94 +33,3 @@ func TestHost(t *testing.T) { t.Fatal("unexpected") } } - -func newTestHosts(n int) []hostdb.Host { - hosts := make([]hostdb.Host, n) - for i := 0; i < n; i++ { - hosts[i] = newTestHost(randomHostKey(), newTestHostPriceTable(), newTestHostSettings()) - } - return hosts -} - -func newTestHost(hk types.PublicKey, pt rhpv3.HostPriceTable, settings rhpv2.HostSettings) hostdb.Host { - return hostdb.Host{ - NetAddress: randomIP().String(), - KnownSince: time.Now(), - LastAnnouncement: time.Now(), - Interactions: hostdb.Interactions{ - TotalScans: 2, - LastScan: time.Now().Add(-time.Minute), - LastScanSuccess: true, - SecondToLastScanSuccess: true, - Uptime: 10 * time.Minute, - Downtime: 10 * time.Minute, - - SuccessfulInteractions: 2, - FailedInteractions: 0, - }, - PublicKey: hk, - PriceTable: hostdb.HostPriceTable{HostPriceTable: pt, Expiry: time.Now().Add(time.Minute)}, - Settings: settings, - Scanned: true, - } -} - -func newTestHostSettings() rhpv2.HostSettings { - return rhpv2.HostSettings{ - AcceptingContracts: true, - Collateral: types.Siacoins(1).Div64(1 << 40), - MaxCollateral: types.Siacoins(10000), - MaxDuration: 144 * 7 * 12, // 12w - Version: "1.5.10", - RemainingStorage: 1 << 42, // 4 TiB - } -} - -func newTestHostPriceTable() rhpv3.HostPriceTable { - oneSC := types.Siacoins(1) - - dlbwPrice := oneSC.Mul64(25).Div64(1 << 40) // 25 SC / TiB - ulbwPrice := oneSC.Div64(1 << 40) // 1 SC / TiB - - return rhpv3.HostPriceTable{ - Validity: time.Minute, - - // fields that are currently always set to 1H. - ReadLengthCost: types.NewCurrency64(1), - WriteLengthCost: types.NewCurrency64(1), - AccountBalanceCost: types.NewCurrency64(1), - FundAccountCost: types.NewCurrency64(1), - UpdatePriceTableCost: types.NewCurrency64(1), - HasSectorBaseCost: types.NewCurrency64(1), - MemoryTimeCost: types.NewCurrency64(1), - DropSectorsBaseCost: types.NewCurrency64(1), - DropSectorsUnitCost: types.NewCurrency64(1), - SwapSectorBaseCost: types.NewCurrency64(1), - - SubscriptionMemoryCost: types.NewCurrency64(1), - SubscriptionNotificationCost: types.NewCurrency64(1), - - InitBaseCost: types.NewCurrency64(1), - DownloadBandwidthCost: dlbwPrice, - UploadBandwidthCost: ulbwPrice, - - CollateralCost: types.Siacoins(1).Div64(1 << 40), - MaxCollateral: types.Siacoins(10000), - - ReadBaseCost: types.NewCurrency64(1), - WriteBaseCost: oneSC.Div64(1 << 40), - WriteStoreCost: oneSC.Div64(4032).Div64(1 << 40), // 1 SC / TiB / month - } -} - -func randomIP() net.IP { - rawIP := make([]byte, 16) - frand.Read(rawIP) - return net.IP(rawIP) -} - -func randomHostKey() types.PublicKey { - var hk types.PublicKey - frand.Read(hk[:]) - return hk -} diff --git a/autopilot/migrator.go b/autopilot/migrator.go index d389a051b..6db5d1ea6 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" @@ -175,7 +176,7 @@ func (m *migrator) performMigrations(p *workerPool) { } } else { m.logger.Infof("%v: migration %d/%d succeeded, key: %v, health: %v, overpaid: %v, shards migrated: %v", id, j.slabIdx+1, j.batchSize, j.Key, j.Health, res.SurchargeApplied, res.NumShardsMigrated) - m.ap.DismissAlert(ctx, alertIDForSlab(alertMigrationID, j.Key)) + m.ap.DismissAlert(ctx, alerts.IDForSlab(alertMigrationID, j.Key)) if res.SurchargeApplied { // this alert confirms the user his gouging // settings are working, it will be dismissed @@ -200,7 +201,12 @@ func (m *migrator) performMigrations(p *workerPool) { OUTER: for { // fetch currently configured set - set := m.ap.State().cfg.Contracts.Set + autopilot, err := m.ap.Config(m.ap.shutdownCtx) + if err != nil { + m.logger.Errorf("failed to fetch autopilot config: %w", err) + return + } + set := autopilot.Config.Contracts.Set if set == "" { m.logger.Error("could not perform migrations, no contract set configured") return diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 76643e5b5..339d3645d 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -11,6 +11,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/autopilot/contractor" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" @@ -210,7 +211,12 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo // fetch the config right before removing offline hosts to get the most // recent settings in case they were updated while scanning. - hostCfg := s.ap.State().cfg.Hosts + autopilot, err := s.ap.Config(ctx) + if err != nil { + s.logger.Errorf("tryPerformHostScan: failed to fetch autopilot config: %v", err) + return + } + hostCfg := autopilot.Config.Hosts maxDowntime := time.Duration(hostCfg.MaxDowntimeHours) * time.Hour minRecentScanFailures := hostCfg.MinRecentScanFailures @@ -315,7 +321,7 @@ func (s *scanner) launchScanWorkers(ctx context.Context, w scanWorker, reqs chan scan, err := w.RHPScan(ctx, req.hostKey, req.hostIP, s.currentTimeout()) if err != nil { break // abort - } else if !utils.IsErr(errors.New(scan.ScanError), errIOTimeout) && scan.Ping > 0 { + } else if !utils.IsErr(errors.New(scan.ScanError), contractor.ErrIOTimeout) && scan.Ping > 0 { s.tracker.addDataPoint(time.Duration(scan.Ping)) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 1cdd096d2..66fb16730 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -10,6 +10,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/internal/test" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -86,7 +87,7 @@ func (w *mockWorker) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, func TestScanner(t *testing.T) { // prepare 100 hosts - hosts := newTestHosts(100) + hosts := test.NewHosts(100) // init new scanner b := &mockBus{hosts: hosts} diff --git a/internal/test/host.go b/internal/test/host.go new file mode 100644 index 000000000..1041fb795 --- /dev/null +++ b/internal/test/host.go @@ -0,0 +1,103 @@ +package test + +import ( + "net" + "time" + + rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" + "go.sia.tech/core/types" + "go.sia.tech/renterd/hostdb" + "lukechampine.com/frand" +) + +func NewHosts(n int) []hostdb.Host { + hosts := make([]hostdb.Host, n) + for i := 0; i < n; i++ { + hosts[i] = NewHost(RandomHostKey(), NewHostPriceTable(), NewHostSettings()) + } + return hosts +} + +func NewHost(hk types.PublicKey, pt rhpv3.HostPriceTable, settings rhpv2.HostSettings) hostdb.Host { + return hostdb.Host{ + NetAddress: randomIP().String(), + KnownSince: time.Now(), + LastAnnouncement: time.Now(), + Interactions: hostdb.Interactions{ + TotalScans: 2, + LastScan: time.Now().Add(-time.Minute), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + Uptime: 10 * time.Minute, + Downtime: 10 * time.Minute, + + SuccessfulInteractions: 2, + FailedInteractions: 0, + }, + PublicKey: hk, + PriceTable: hostdb.HostPriceTable{HostPriceTable: pt, Expiry: time.Now().Add(time.Minute)}, + Settings: settings, + Scanned: true, + } +} + +func NewHostSettings() rhpv2.HostSettings { + return rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1).Div64(1 << 40), + MaxCollateral: types.Siacoins(10000), + MaxDuration: 144 * 7 * 12, // 12w + Version: "1.5.10", + RemainingStorage: 1 << 42, // 4 TiB + } +} + +func NewHostPriceTable() rhpv3.HostPriceTable { + oneSC := types.Siacoins(1) + + dlbwPrice := oneSC.Mul64(25).Div64(1 << 40) // 25 SC / TiB + ulbwPrice := oneSC.Div64(1 << 40) // 1 SC / TiB + + return rhpv3.HostPriceTable{ + Validity: time.Minute, + + // fields that are currently always set to 1H. + ReadLengthCost: types.NewCurrency64(1), + WriteLengthCost: types.NewCurrency64(1), + AccountBalanceCost: types.NewCurrency64(1), + FundAccountCost: types.NewCurrency64(1), + UpdatePriceTableCost: types.NewCurrency64(1), + HasSectorBaseCost: types.NewCurrency64(1), + MemoryTimeCost: types.NewCurrency64(1), + DropSectorsBaseCost: types.NewCurrency64(1), + DropSectorsUnitCost: types.NewCurrency64(1), + SwapSectorBaseCost: types.NewCurrency64(1), + + SubscriptionMemoryCost: types.NewCurrency64(1), + SubscriptionNotificationCost: types.NewCurrency64(1), + + InitBaseCost: types.NewCurrency64(1), + DownloadBandwidthCost: dlbwPrice, + UploadBandwidthCost: ulbwPrice, + + CollateralCost: types.Siacoins(1).Div64(1 << 40), + MaxCollateral: types.Siacoins(10000), + + ReadBaseCost: types.NewCurrency64(1), + WriteBaseCost: oneSC.Div64(1 << 40), + WriteStoreCost: oneSC.Div64(4032).Div64(1 << 40), // 1 SC / TiB / month + } +} + +func RandomHostKey() types.PublicKey { + var hk types.PublicKey + frand.Read(hk[:]) + return hk +} + +func randomIP() net.IP { + rawIP := make([]byte, 16) + frand.Read(rawIP) + return net.IP(rawIP) +} diff --git a/internal/utils/errors.go b/internal/utils/errors.go index b884cde70..6c248b61d 100644 --- a/internal/utils/errors.go +++ b/internal/utils/errors.go @@ -5,6 +5,15 @@ import ( "strings" ) +// Common i/o related errors +var ( + ErrNoRouteToHost = errors.New("no route to host") + ErrNoSuchHost = errors.New("no such host") + ErrConnectionRefused = errors.New("connection refused") + ErrConnectionTimedOut = errors.New("connection timed out") + ErrConnectionResetByPeer = errors.New("connection reset by peer") +) + // IsErr can be used to compare an error to a target and also works when used on // errors that haven't been wrapped since it will fall back to a string // comparison. Useful to check errors returned over the network. From 28544d517480d8dea75351d8cb9d6478d256d9e4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 12:57:06 +0100 Subject: [PATCH 085/201] contractor: remove worker from state --- autopilot/autopilot.go | 2 +- autopilot/contractor/contractor.go | 41 +++++++++++------------------- autopilot/contractor/state.go | 1 - 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 6d8d4c3d3..fc75a950b 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -306,7 +306,7 @@ func (ap *Autopilot) Run() error { } // perform maintenance - setChanged, err := ap.c.PerformContractMaintenance(ap.shutdownCtx, state) + setChanged, err := ap.c.PerformContractMaintenance(ap.shutdownCtx, w, state) if err != nil && utils.IsErr(err, context.Canceled) { return } else if err != nil { diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 0eee7e7ef..45cbfc2a9 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -225,10 +225,7 @@ func canSkipContractMaintenance(ctx context.Context, cfg api.ContractsConfig) (s return "", false } -func (c *Contractor) PerformContractMaintenance(ctx context.Context, state *State) (bool, error) { - // convenience variables - w := state.Worker - +func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, state *State) (bool, error) { // check if we can skip maintenance if reason, skip := canSkipContractMaintenance(ctx, state.Config().Contracts); skip { if reason != "" { @@ -344,7 +341,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, state *Stat c.mu.Unlock() // run checks - updatedSet, toArchive, toStopUsing, toRefresh, toRenew, err := c.runContractChecks(ctx, state, contracts, isInCurrentSet, minScore) + updatedSet, toArchive, toStopUsing, toRefresh, toRenew, err := c.runContractChecks(ctx, w, state, contracts, isInCurrentSet, minScore) if err != nil { return false, fmt.Errorf("failed to run contract checks, err: %v", err) } @@ -387,7 +384,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, state *Stat var renewed []renewal if limit > 0 { var toKeep []api.ContractMetadata - renewed, toKeep = c.runContractRenewals(ctx, state, toRenew, &remaining, limit) + renewed, toKeep = c.runContractRenewals(ctx, w, state, toRenew, &remaining, limit) for _, ri := range renewed { if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) @@ -398,7 +395,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, state *Stat } // run contract refreshes - refreshed, err := c.runContractRefreshes(ctx, state, toRefresh, &remaining) + refreshed, err := c.runContractRefreshes(ctx, w, state, toRefresh, &remaining) if err != nil { c.logger.Errorf("failed to refresh contracts, err: %v", err) // continue } else { @@ -429,7 +426,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, state *Stat } else if wallet.Confirmed.IsZero() && wallet.Unconfirmed.IsZero() { c.logger.Warn("contract formations skipped, wallet is empty") } else { - formed, err = c.runContractFormations(ctx, state, candidates, usedHosts, unusableHosts, state.WantedContracts()-uint64(len(updatedSet)), &remaining) + formed, err = c.runContractFormations(ctx, w, state, candidates, usedHosts, unusableHosts, state.WantedContracts()-uint64(len(updatedSet)), &remaining) if err != nil { c.logger.Errorf("failed to form contracts, err: %v", err) // continue } else { @@ -602,7 +599,7 @@ func (c *Contractor) computeContractSetChanged(ctx context.Context, state *State return hasChanged } -func (c *Contractor) runContractChecks(ctx context.Context, state *State, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { +func (c *Contractor) runContractChecks(ctx context.Context, w Worker, state *State, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { select { case <-ctx.Done(): return @@ -610,9 +607,6 @@ func (c *Contractor) runContractChecks(ctx context.Context, state *State, contra } c.logger.Debug("running contract checks") - // convenience variables - w := state.Worker - // fetch consensus state cs, err := c.bus.ConsensusState(ctx) if err != nil { @@ -792,7 +786,7 @@ LOOP: return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *Contractor) runContractFormations(ctx context.Context, state *State, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { +func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state *State, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { select { case <-ctx.Done(): return nil, nil @@ -800,7 +794,6 @@ func (c *Contractor) runContractFormations(ctx context.Context, state *State, ca } // convenience variables - w := state.Worker shouldFilter := !state.AllowRedundantIPs() c.logger.Debugw( @@ -899,7 +892,7 @@ LOOP: continue } - formedContract, proceed, err := c.formContract(ctx, state, host, minInitialContractFunds, maxInitialContractFunds, budget) + formedContract, proceed, err := c.formContract(ctx, w, state, host, minInitialContractFunds, maxInitialContractFunds, budget) if err == nil { // add contract to contract set formed = append(formed, formedContract) @@ -982,7 +975,7 @@ func (c *Contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } } -func (c *Contractor) runContractRenewals(ctx context.Context, state *State, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { +func (c *Contractor) runContractRenewals(ctx context.Context, w Worker, state *State, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { c.logger.Debugw( "run contracts renewals", "torenew", len(toRenew), @@ -1014,7 +1007,7 @@ func (c *Contractor) runContractRenewals(ctx context.Context, state *State, toRe // renew and add if it succeeds or if its usable contract := toRenew[i].contract.ContractMetadata - renewed, proceed, err := c.renewContract(ctx, state, toRenew[i], budget) + renewed, proceed, err := c.renewContract(ctx, w, state, toRenew[i], budget) if err != nil { c.alerter.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) if toRenew[i].usable { @@ -1042,7 +1035,7 @@ func (c *Contractor) runContractRenewals(ctx context.Context, state *State, toRe return renewals, toKeep } -func (c *Contractor) runContractRefreshes(ctx context.Context, state *State, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { +func (c *Contractor) runContractRefreshes(ctx context.Context, w Worker, state *State, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { c.logger.Debugw( "run contracts refreshes", "torefresh", len(toRefresh), @@ -1065,7 +1058,7 @@ func (c *Contractor) runContractRefreshes(ctx context.Context, state *State, toR } // refresh and add if it succeeds - renewed, proceed, err := c.refreshContract(ctx, state, ci, budget) + renewed, proceed, err := c.refreshContract(ctx, w, state, ci, budget) if err == nil { refreshed = append(refreshed, renewal{from: ci.contract.ContractMetadata, to: renewed, ci: ci}) } @@ -1322,7 +1315,7 @@ func (c *Contractor) candidateHosts(ctx context.Context, state *State, hosts []h return candidates, unusableHostResult, nil } -func (c *Contractor) renewContract(ctx context.Context, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) renewContract(ctx context.Context, w Worker, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't renew contract without a revision") } @@ -1333,7 +1326,6 @@ func (c *Contractor) renewContract(ctx context.Context, state *State, ci contrac fcid := contract.ID rev := contract.Revision hk := contract.HostKey - w := state.Worker // fetch consensus state cs, err := c.bus.ConsensusState(ctx) @@ -1403,7 +1395,7 @@ func (c *Contractor) renewContract(ctx context.Context, state *State, ci contrac return renewedContract, true, nil } -func (c *Contractor) refreshContract(ctx context.Context, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't refresh contract without a revision") } @@ -1414,7 +1406,6 @@ func (c *Contractor) refreshContract(ctx context.Context, state *State, ci contr fcid := contract.ID rev := contract.Revision hk := contract.HostKey - w := state.Worker // fetch consensus state cs, err := c.bus.ConsensusState(ctx) @@ -1483,9 +1474,7 @@ func (c *Contractor) refreshContract(ctx context.Context, state *State, ci contr return refreshedContract, true, nil } -func (c *Contractor) formContract(ctx context.Context, state *State, host hostdb.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { - w := state.Worker - +func (c *Contractor) formContract(ctx context.Context, w Worker, state *State, host hostdb.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { // convenience variables hk := host.PublicKey diff --git a/autopilot/contractor/state.go b/autopilot/contractor/state.go index 769bdd8f4..1d9781ca2 100644 --- a/autopilot/contractor/state.go +++ b/autopilot/contractor/state.go @@ -16,7 +16,6 @@ type ( Address types.Address Fee types.Currency - Worker Worker } ) From b00038e59e2ed4c3d9e60ca5ab6f05dd1d8f640b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 13:13:13 +0100 Subject: [PATCH 086/201] contractor: pass ContractSettings where possible --- autopilot/autopilot.go | 2 +- autopilot/contractor/contractor.go | 20 ++++++++-------- autopilot/contractor/evaluate.go | 16 ++++++------- autopilot/contractor/evaluate_test.go | 9 +++----- autopilot/contractor/hostfilter.go | 2 +- autopilot/contractor/hostinfo.go | 8 +++---- autopilot/contractor/hostscore.go | 32 +++++++++++++------------- autopilot/contractor/hostscore_test.go | 15 +++++------- autopilot/contractor/state.go | 6 ++++- 9 files changed, 54 insertions(+), 56 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index fc75a950b..df756ab98 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -207,7 +207,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // evaluate the config - jc.Encode(contractor.EvaluateConfig(reqCfg, cs, fee, cfg.CurrentPeriod, rs, gs, hosts)) + jc.Encode(contractor.EvaluateConfig(reqCfg.Contracts, cs, fee, cfg.CurrentPeriod, rs, gs, hosts)) } func (ap *Autopilot) Run() error { diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 45cbfc2a9..dd5774a7e 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -227,7 +227,7 @@ func canSkipContractMaintenance(ctx context.Context, cfg api.ContractsConfig) (s func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, state *State) (bool, error) { // check if we can skip maintenance - if reason, skip := canSkipContractMaintenance(ctx, state.Config().Contracts); skip { + if reason, skip := canSkipContractMaintenance(ctx, state.ContractsConfig()); skip { if reason != "" { c.logger.Warn(reason) } @@ -326,7 +326,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s for _, h := range hosts { // ignore the pricetable's HostBlockHeight by setting it to our own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.Config(), state.RS, gc, h, minScore, hostData[h.PublicKey]) + isUsable, unusableResult := isUsableHost(state.ContractsConfig(), state.RS, gc, h, minScore, hostData[h.PublicKey]) hostInfos[h.PublicKey] = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, @@ -719,7 +719,7 @@ LOOP: host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.Config(), state.RS, gc, host, minScore, contract.FileSize()) + usable, unusableResult := isUsableHost(state.ContractsConfig(), state.RS, gc, host, minScore, contract.FileSize()) if !usable { reasons := unusableResult.reasons() toStopUsing[fcid] = strings.Join(reasons, ",") @@ -756,7 +756,7 @@ LOOP: // decide whether the contract is still good ci := contractInfo{contract: contract, priceTable: host.PriceTable.HostPriceTable, settings: host.Settings} - usable, recoverable, refresh, renew, reasons := c.isUsableContract(state.Config(), state.RS, ci, cs.BlockHeight, ipFilter) + usable, recoverable, refresh, renew, reasons := c.isUsableContract(state.AutopilotConfig(), state.RS, ci, cs.BlockHeight, ipFilter) ci.usable = usable ci.recoverable = recoverable if !usable { @@ -850,7 +850,7 @@ func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state } // calculate min/max contract funds - minInitialContractFunds, maxInitialContractFunds := initialContractFundingMinMax(state.Config()) + minInitialContractFunds, maxInitialContractFunds := initialContractFundingMinMax(state.AutopilotConfig()) LOOP: for h := 0; missing > 0 && h < len(selected); h++ { @@ -1168,7 +1168,7 @@ func (c *Contractor) renewFundingEstimate(ctx context.Context, state *State, ci // check for a sane minimum that is equal to the initial contract funding // but without an upper cap. - minInitialContractFunds, _ := initialContractFundingMinMax(state.Config()) + minInitialContractFunds, _ := initialContractFundingMinMax(state.AutopilotConfig()) minimum := c.initialContractFunding(ci.settings, txnFeeEstimate, minInitialContractFunds, types.ZeroCurrency) cappedEstimatedCost := estimatedCost if cappedEstimatedCost.Cmp(minimum) < 0 { @@ -1293,7 +1293,7 @@ func (c *Contractor) candidateHosts(ctx context.Context, state *State, hosts []h // NOTE: ignore the pricetable's HostBlockHeight by setting it to our // own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - usable, result := isUsableHost(state.Config(), state.RS, gc, h, minScore, storedData[h.PublicKey]) + usable, result := isUsableHost(state.ContractsConfig(), state.RS, gc, h, minScore, storedData[h.PublicKey]) if usable { candidates = append(candidates, scoredHost{h.Host, result.scoreBreakdown.Score()}) continue @@ -1415,8 +1415,8 @@ func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *State // calculate the renter funds var renterFunds types.Currency - if isOutOfFunds(state.Config(), ci.priceTable, ci.contract) { - renterFunds = c.refreshFundingEstimate(state.Config(), ci, state.Fee) + if isOutOfFunds(state.AutopilotConfig(), ci.priceTable, ci.contract) { + renterFunds = c.refreshFundingEstimate(state.AutopilotConfig(), ci, state.Fee) } else { renterFunds = rev.ValidRenterPayout() // don't increase funds } @@ -1431,7 +1431,7 @@ func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *State unallocatedCollateral := contract.RemainingCollateral() // a refresh should always result in a contract that has enough collateral - minNewCollateral := minRemainingCollateral(state.Config(), state.RS, renterFunds, settings, ci.priceTable).Mul64(2) + minNewCollateral := minRemainingCollateral(state.AutopilotConfig(), state.RS, renterFunds, settings, ci.priceTable).Mul64(2) // renew the contract resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, state.Address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize) diff --git a/autopilot/contractor/evaluate.go b/autopilot/contractor/evaluate.go index b34cd92a1..eb7c9a94b 100644 --- a/autopilot/contractor/evaluate.go +++ b/autopilot/contractor/evaluate.go @@ -7,8 +7,8 @@ import ( "go.sia.tech/renterd/worker" ) -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) +func countUsableHosts(cfg api.ContractsConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.RenewWindow) for _, host := range hosts { usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) if usable { @@ -21,8 +21,8 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // EvaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) +func EvaluateConfig(cfg api.ContractsConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { + gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.RenewWindow) resp.Hosts = uint64(len(hosts)) for _, host := range hosts { @@ -60,7 +60,7 @@ func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu } } - if resp.Usable >= cfg.Contracts.Amount { + if resp.Usable >= cfg.Amount { return // no recommendation needed } @@ -136,8 +136,8 @@ func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // optimiseGougingSetting tries to optimise one field of the gouging settings to // try and hit the target number of contracts. -func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { - if cfg.Contracts.Amount == 0 { +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.ContractsConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { + if cfg.Amount == 0 { return true // nothing to do } stepSize := []uint64{200, 150, 125, 110, 105} @@ -148,7 +148,7 @@ func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg prevVal := *field // to keep accurate value for { nUsable := countUsableHosts(cfg, cs, fee, currentPeriod, rs, *gs, hosts) - targetHit := nUsable >= cfg.Contracts.Amount + targetHit := nUsable >= cfg.Amount if targetHit && nSteps == 0 { return true // target already hit without optimising diff --git a/autopilot/contractor/evaluate_test.go b/autopilot/contractor/evaluate_test.go index 66b2023c7..228148da8 100644 --- a/autopilot/contractor/evaluate_test.go +++ b/autopilot/contractor/evaluate_test.go @@ -46,12 +46,9 @@ func TestOptimiseGougingSetting(t *testing.T) { } // prepare settings that result in all hosts being usable - cfg := api.AutopilotConfig{ - Contracts: api.ContractsConfig{ - Allowance: types.Siacoins(100000), - Amount: 10, - }, - Hosts: api.HostsConfig{}, + cfg := api.ContractsConfig{ + Allowance: types.Siacoins(100000), + Amount: 10, } cs := api.ConsensusState{ BlockHeight: 100, diff --git a/autopilot/contractor/hostfilter.go b/autopilot/contractor/hostfilter.go index 998bad2a0..1b828627a 100644 --- a/autopilot/contractor/hostfilter.go +++ b/autopilot/contractor/hostfilter.go @@ -176,7 +176,7 @@ func (u *unusableHostResult) keysAndValues() []interface{} { // isUsableHost returns whether the given host is usable along with a list of // reasons why it was deemed unusable. -func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.HostInfo, minScore float64, storedData uint64) (bool, unusableHostResult) { +func isUsableHost(cfg api.ContractsConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.HostInfo, minScore float64, storedData uint64) (bool, unusableHostResult) { if rs.Validate() != nil { panic("invalid redundancy settings were supplied - developer error") } diff --git a/autopilot/contractor/hostinfo.go b/autopilot/contractor/hostinfo.go index 2483a2ee5..3a07f664f 100644 --- a/autopilot/contractor/hostinfo.go +++ b/autopilot/contractor/hostinfo.go @@ -11,10 +11,10 @@ import ( ) func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, state *State) (api.HostHandlerResponse, error) { - if state.Config().Contracts.Allowance.IsZero() { + if state.ContractsConfig().Allowance.IsZero() { return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contracts allowance is zero") } - if state.Config().Contracts.Amount == 0 { + if state.ContractsConfig().Amount == 0 { return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contracts amount is zero") } if state.Period() == 0 { @@ -39,7 +39,7 @@ func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, stat // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.Config(), state.RS, gc, host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.ContractsConfig(), state.RS, gc, host, minScore, storedData) return api.HostHandlerResponse{ Host: host.Host, Checks: &api.HostHandlerResponseChecks{ @@ -75,7 +75,7 @@ func (c *Contractor) hostInfoFromCache(ctx context.Context, state *State, host h c.logger.Error("failed to fetch consensus state from bus: %v", err) } else { gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) - isUsable, unusableResult := isUsableHost(state.Config(), state.RS, gc, host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.ContractsConfig(), state.RS, gc, host, minScore, storedData) hi = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, diff --git a/autopilot/contractor/hostscore.go b/autopilot/contractor/hostscore.go index 427ea6926..8caa6602e 100644 --- a/autopilot/contractor/hostscore.go +++ b/autopilot/contractor/hostscore.go @@ -15,11 +15,11 @@ import ( const smallestValidScore = math.SmallestNonzeroFloat64 -func hostScore(cfg api.AutopilotConfig, h hostdb.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { +func hostScore(cfg api.ContractsConfig, h hostdb.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { // idealDataPerHost is the amount of data that we would have to put on each // host assuming that our storage requirements were spread evenly across // every single host. - idealDataPerHost := float64(cfg.Contracts.Storage) * expectedRedundancy / float64(cfg.Contracts.Amount) + idealDataPerHost := float64(cfg.Storage) * expectedRedundancy / float64(cfg.Amount) // allocationPerHost is the amount of data that we would like to be able to // put on each host, because data is not always spread evenly across the // hosts during upload. Slower hosts may get very little data, more @@ -51,8 +51,8 @@ func hostScore(cfg api.AutopilotConfig, h hostdb.Host, storedData uint64, expect // - If the host is more expensive than expected, an exponential malus is applied. // A 2x ratio will already cause the score to drop to 0.16 and a 3x ratio causes // it to drop to 0.05. -func priceAdjustmentScore(hostCostPerPeriod types.Currency, cfg api.AutopilotConfig) float64 { - hostPeriodBudget := cfg.Contracts.Allowance.Div64(cfg.Contracts.Amount) +func priceAdjustmentScore(hostCostPerPeriod types.Currency, cfg api.ContractsConfig) float64 { + hostPeriodBudget := cfg.Allowance.Div64(cfg.Amount) ratio := new(big.Rat).SetFrac(hostCostPerPeriod.Big(), hostPeriodBudget.Big()) fRatio, _ := ratio.Float64() @@ -125,7 +125,7 @@ func ageScore(h hostdb.Host) float64 { return weight } -func collateralScore(cfg api.AutopilotConfig, pt rhpv3.HostPriceTable, allocationPerHost uint64) float64 { +func collateralScore(cfg api.ContractsConfig, pt rhpv3.HostPriceTable, allocationPerHost uint64) float64 { // Ignore hosts which have set their max collateral to 0. if pt.MaxCollateral.IsZero() || pt.CollateralCost.IsZero() { return 0 @@ -137,10 +137,10 @@ func collateralScore(cfg api.AutopilotConfig, pt rhpv3.HostPriceTable, allocatio // compute the cost of storing numSectors := bytesToSectors(allocationPerHost) - storageCost := pt.AppendSectorCost(cfg.Contracts.Period).Storage.Mul64(numSectors) + storageCost := pt.AppendSectorCost(cfg.Period).Storage.Mul64(numSectors) // calculate the expected collateral for the host allocation. - expectedCollateral := pt.CollateralCost.Mul64(allocationPerHost).Mul64(cfg.Contracts.Period) + expectedCollateral := pt.CollateralCost.Mul64(allocationPerHost).Mul64(cfg.Period) if expectedCollateral.Cmp(pt.MaxCollateral) > 0 { expectedCollateral = pt.MaxCollateral } @@ -285,8 +285,8 @@ func sectorUploadCost(pt rhpv3.HostPriceTable, duration uint64) types.Currency { return uploadSectorCostRHPv3 } -func uploadCostForScore(cfg api.AutopilotConfig, h hostdb.Host, bytes uint64) types.Currency { - uploadSectorCostRHPv3 := sectorUploadCost(h.PriceTable.HostPriceTable, cfg.Contracts.Period) +func uploadCostForScore(cfg api.ContractsConfig, h hostdb.Host, bytes uint64) types.Currency { + uploadSectorCostRHPv3 := sectorUploadCost(h.PriceTable.HostPriceTable, cfg.Period) numSectors := bytesToSectors(bytes) return uploadSectorCostRHPv3.Mul64(numSectors) } @@ -298,20 +298,20 @@ func downloadCostForScore(h hostdb.Host, bytes uint64) types.Currency { return downloadSectorCostRHPv3.Mul64(numSectors) } -func storageCostForScore(cfg api.AutopilotConfig, h hostdb.Host, bytes uint64) types.Currency { - storeSectorCostRHPv3 := sectorStorageCost(h.PriceTable.HostPriceTable, cfg.Contracts.Period) +func storageCostForScore(cfg api.ContractsConfig, h hostdb.Host, bytes uint64) types.Currency { + storeSectorCostRHPv3 := sectorStorageCost(h.PriceTable.HostPriceTable, cfg.Period) numSectors := bytesToSectors(bytes) return storeSectorCostRHPv3.Mul64(numSectors) } -func hostPeriodCostForScore(h hostdb.Host, cfg api.AutopilotConfig, expectedRedundancy float64) types.Currency { +func hostPeriodCostForScore(h hostdb.Host, cfg api.ContractsConfig, expectedRedundancy float64) types.Currency { // compute how much data we upload, download and store. - uploadPerHost := uint64(float64(cfg.Contracts.Upload) * expectedRedundancy / float64(cfg.Contracts.Amount)) - downloadPerHost := uint64(float64(cfg.Contracts.Download) * expectedRedundancy / float64(cfg.Contracts.Amount)) - storagePerHost := uint64(float64(cfg.Contracts.Storage) * expectedRedundancy / float64(cfg.Contracts.Amount)) + uploadPerHost := uint64(float64(cfg.Upload) * expectedRedundancy / float64(cfg.Amount)) + downloadPerHost := uint64(float64(cfg.Download) * expectedRedundancy / float64(cfg.Amount)) + storagePerHost := uint64(float64(cfg.Storage) * expectedRedundancy / float64(cfg.Amount)) // compute the individual costs. - hostCollateral := rhpv2.ContractFormationCollateral(cfg.Contracts.Period, storagePerHost, h.Settings) + hostCollateral := rhpv2.ContractFormationCollateral(cfg.Period, storagePerHost, h.Settings) hostContractPrice := contractPriceForScore(h) hostUploadCost := uploadCostForScore(cfg, h, uploadPerHost) hostDownloadCost := downloadCostForScore(h, downloadPerHost) diff --git a/autopilot/contractor/hostscore_test.go b/autopilot/contractor/hostscore_test.go index 76b6c3bae..5e849ae35 100644 --- a/autopilot/contractor/hostscore_test.go +++ b/autopilot/contractor/hostscore_test.go @@ -40,6 +40,7 @@ func TestHostScore(t *testing.T) { } h1 := newHost(test.NewHostSettings()) h2 := newHost(test.NewHostSettings()) + cfg := cfg.Contracts // assert both hosts score equal redundancy := 3.0 @@ -109,11 +110,9 @@ func TestHostScore(t *testing.T) { func TestPriceAdjustmentScore(t *testing.T) { score := func(cpp uint32) float64 { t.Helper() - cfg := api.AutopilotConfig{ - Contracts: api.ContractsConfig{ - Allowance: types.Siacoins(5000), - Amount: 50, - }, + cfg := api.ContractsConfig{ + Allowance: types.Siacoins(5000), + Amount: 50, } return priceAdjustmentScore(types.Siacoins(cpp), cfg) } @@ -174,10 +173,8 @@ func TestCollateralScore(t *testing.T) { storageCost := uint64(100) score := func(collateral, maxCollateral uint64) float64 { t.Helper() - cfg := api.AutopilotConfig{ - Contracts: api.ContractsConfig{ - Period: period, - }, + cfg := api.ContractsConfig{ + Period: period, } pt := rhpv3.HostPriceTable{ CollateralCost: types.NewCurrency64(collateral), diff --git a/autopilot/contractor/state.go b/autopilot/contractor/state.go index 1d9781ca2..663d40cfb 100644 --- a/autopilot/contractor/state.go +++ b/autopilot/contractor/state.go @@ -27,10 +27,14 @@ func (state *State) Allowance() types.Currency { return state.AP.Config.Contracts.Allowance } -func (state *State) Config() api.AutopilotConfig { +func (state *State) AutopilotConfig() api.AutopilotConfig { return state.AP.Config } +func (state *State) ContractsConfig() api.ContractsConfig { + return state.AP.Config.Contracts +} + func (state *State) ContractSet() string { return state.AP.Config.Contracts.Set } From 97c985e104e4d983fc60b43ed618923f1e906fcf Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 13:38:05 +0100 Subject: [PATCH 087/201] contractor: State -> MaintenanceState; contractor: remove 2 interface functions from Bus interface --- autopilot/autopilot.go | 15 ++++++-- autopilot/contractor/contract_spending.go | 2 +- autopilot/contractor/contractor.go | 45 +++++++++-------------- autopilot/contractor/hostinfo.go | 6 +-- autopilot/contractor/state.go | 29 ++++++++------- 5 files changed, 47 insertions(+), 50 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index df756ab98..512fb5d6b 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -771,7 +771,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { jc.Encode(hosts) } -func (ap *Autopilot) buildState(ctx context.Context) (*contractor.State, error) { +func (ap *Autopilot) buildState(ctx context.Context) (*contractor.MaintenanceState, error) { // fetch the autopilot from the bus autopilot, err := ap.Config(ctx) if err != nil { @@ -809,6 +809,12 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.State, error) } address := wi.Address + // no need to try and form contracts if wallet is completely empty + skipContractFormations := wi.Confirmed.IsZero() && wi.Unconfirmed.IsZero() + if skipContractFormations { + ap.logger.Warn("contract formations skipped, wallet is empty") + } + // update current period if necessary if cs.Synced { if autopilot.CurrentPeriod == 0 { @@ -829,12 +835,13 @@ func (ap *Autopilot) buildState(ctx context.Context) (*contractor.State, error) } } - return &contractor.State{ + return &contractor.MaintenanceState{ GS: gs, RS: rs, AP: autopilot, - Address: address, - Fee: fee, + Address: address, + Fee: fee, + SkipContractFormations: skipContractFormations, }, nil } diff --git a/autopilot/contractor/contract_spending.go b/autopilot/contractor/contract_spending.go index 0938e2755..f72a045d3 100644 --- a/autopilot/contractor/contract_spending.go +++ b/autopilot/contractor/contract_spending.go @@ -44,7 +44,7 @@ func (c *Contractor) currentPeriodSpending(contracts []api.Contract, currentPeri return totalAllocated } -func (c *Contractor) remainingFunds(contracts []api.Contract, state *State) types.Currency { +func (c *Contractor) remainingFunds(contracts []api.Contract, state *MaintenanceState) types.Currency { // find out how much we spent in the current period spent := c.currentPeriodSpending(contracts, state.Period()) diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index dd5774a7e..e3957af3a 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -86,14 +86,12 @@ type Bus interface { AncestorContracts(ctx context.Context, id types.FileContractID, minStartHeight uint64) ([]api.ArchivedContract, error) ArchiveContracts(ctx context.Context, toArchive map[types.FileContractID]string) error ConsensusState(ctx context.Context) (api.ConsensusState, error) - Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) Contracts(ctx context.Context, opts api.ContractsOpts) (contracts []api.ContractMetadata, err error) FileContractTax(ctx context.Context, payout types.Currency) (types.Currency, error) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) SetContractSet(ctx context.Context, set string, contracts []types.FileContractID) error - Wallet(ctx context.Context) (api.WalletResponse, error) } type Worker interface { @@ -225,7 +223,7 @@ func canSkipContractMaintenance(ctx context.Context, cfg api.ContractsConfig) (s return "", false } -func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, state *State) (bool, error) { +func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, state *MaintenanceState) (bool, error) { // check if we can skip maintenance if reason, skip := canSkipContractMaintenance(ctx, state.ContractsConfig()); skip { if reason != "" { @@ -417,23 +415,14 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s // check if we need to form contracts and add them to the contract set var formed []api.ContractMetadata - if uint64(len(updatedSet)) < threshold { - // no need to try and form contracts if wallet is completely empty - wallet, err := c.bus.Wallet(ctx) + if uint64(len(updatedSet)) < threshold && !state.SkipContractFormations { + formed, err = c.runContractFormations(ctx, w, state, candidates, usedHosts, unusableHosts, state.WantedContracts()-uint64(len(updatedSet)), &remaining) if err != nil { - c.logger.Errorf("failed to fetch wallet, err: %v", err) - return false, err - } else if wallet.Confirmed.IsZero() && wallet.Unconfirmed.IsZero() { - c.logger.Warn("contract formations skipped, wallet is empty") + c.logger.Errorf("failed to form contracts, err: %v", err) // continue } else { - formed, err = c.runContractFormations(ctx, w, state, candidates, usedHosts, unusableHosts, state.WantedContracts()-uint64(len(updatedSet)), &remaining) - if err != nil { - c.logger.Errorf("failed to form contracts, err: %v", err) // continue - } else { - for _, fc := range formed { - updatedSet = append(updatedSet, fc) - contractData[fc.ID] = 0 - } + for _, fc := range formed { + updatedSet = append(updatedSet, fc) + contractData[fc.ID] = 0 } } } @@ -471,7 +460,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s return c.computeContractSetChanged(ctx, state, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil } -func (c *Contractor) computeContractSetChanged(ctx context.Context, state *State, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { +func (c *Contractor) computeContractSetChanged(ctx context.Context, state *MaintenanceState, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { name := state.ContractSet() // build set lookups @@ -599,7 +588,7 @@ func (c *Contractor) computeContractSetChanged(ctx context.Context, state *State return hasChanged } -func (c *Contractor) runContractChecks(ctx context.Context, w Worker, state *State, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { +func (c *Contractor) runContractChecks(ctx context.Context, w Worker, state *MaintenanceState, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { select { case <-ctx.Done(): return @@ -786,7 +775,7 @@ LOOP: return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state *State, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { +func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state *MaintenanceState, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { select { case <-ctx.Done(): return nil, nil @@ -975,7 +964,7 @@ func (c *Contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } } -func (c *Contractor) runContractRenewals(ctx context.Context, w Worker, state *State, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { +func (c *Contractor) runContractRenewals(ctx context.Context, w Worker, state *MaintenanceState, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { c.logger.Debugw( "run contracts renewals", "torenew", len(toRenew), @@ -1035,7 +1024,7 @@ func (c *Contractor) runContractRenewals(ctx context.Context, w Worker, state *S return renewals, toKeep } -func (c *Contractor) runContractRefreshes(ctx context.Context, w Worker, state *State, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { +func (c *Contractor) runContractRefreshes(ctx context.Context, w Worker, state *MaintenanceState, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { c.logger.Debugw( "run contracts refreshes", "torefresh", len(toRefresh), @@ -1109,7 +1098,7 @@ func (c *Contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contract return refreshAmountCapped } -func (c *Contractor) renewFundingEstimate(ctx context.Context, state *State, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { +func (c *Contractor) renewFundingEstimate(ctx context.Context, state *MaintenanceState, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { // estimate the cost of the current data stored dataStored := ci.contract.FileSize() storageCost := sectorStorageCost(ci.priceTable, state.Period()).Mul64(bytesToSectors(dataStored)) @@ -1245,7 +1234,7 @@ func (c *Contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *Contractor) candidateHosts(ctx context.Context, state *State, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *Contractor) candidateHosts(ctx context.Context, state *MaintenanceState, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { start := time.Now() // fetch consensus state @@ -1315,7 +1304,7 @@ func (c *Contractor) candidateHosts(ctx context.Context, state *State, hosts []h return candidates, unusableHostResult, nil } -func (c *Contractor) renewContract(ctx context.Context, w Worker, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) renewContract(ctx context.Context, w Worker, state *MaintenanceState, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't renew contract without a revision") } @@ -1395,7 +1384,7 @@ func (c *Contractor) renewContract(ctx context.Context, w Worker, state *State, return renewedContract, true, nil } -func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *State, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *MaintenanceState, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't refresh contract without a revision") } @@ -1474,7 +1463,7 @@ func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *State return refreshedContract, true, nil } -func (c *Contractor) formContract(ctx context.Context, w Worker, state *State, host hostdb.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) formContract(ctx context.Context, w Worker, state *MaintenanceState, host hostdb.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { // convenience variables hk := host.PublicKey diff --git a/autopilot/contractor/hostinfo.go b/autopilot/contractor/hostinfo.go index 3a07f664f..87f530943 100644 --- a/autopilot/contractor/hostinfo.go +++ b/autopilot/contractor/hostinfo.go @@ -10,7 +10,7 @@ import ( "go.sia.tech/renterd/worker" ) -func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, state *State) (api.HostHandlerResponse, error) { +func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, state *MaintenanceState) (api.HostHandlerResponse, error) { if state.ContractsConfig().Allowance.IsZero() { return api.HostHandlerResponse{}, fmt.Errorf("can not score hosts because contracts allowance is zero") } @@ -53,7 +53,7 @@ func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, stat }, nil } -func (c *Contractor) hostInfoFromCache(ctx context.Context, state *State, host hostdb.HostInfo) (hi hostInfo, found bool) { +func (c *Contractor) hostInfoFromCache(ctx context.Context, state *MaintenanceState, host hostdb.HostInfo) (hi hostInfo, found bool) { // grab host details from cache c.mu.Lock() hi, found = c.cachedHostInfo[host.PublicKey] @@ -91,7 +91,7 @@ func (c *Contractor) hostInfoFromCache(ctx context.Context, state *State, host h return } -func (c *Contractor) HostInfos(ctx context.Context, state *State, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostHandlerResponse, error) { +func (c *Contractor) HostInfos(ctx context.Context, state *MaintenanceState, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostHandlerResponse, error) { // declare helper to decide whether to keep a host. if !isValidUsabilityFilterMode(usabilityMode) { return nil, fmt.Errorf("invalid usability mode: '%v', options are 'usable', 'unusable' or an empty string for no filter", usabilityMode) diff --git a/autopilot/contractor/state.go b/autopilot/contractor/state.go index 663d40cfb..2ae853dac 100644 --- a/autopilot/contractor/state.go +++ b/autopilot/contractor/state.go @@ -6,51 +6,52 @@ import ( ) type ( - // State serves as input for the contractor's maintenance. It contains all + // MaintenanceState serves as input for the contractor's maintenance. It contains all // state that should remain constant across a single round of contract // performance. - State struct { + MaintenanceState struct { GS api.GougingSettings RS api.RedundancySettings AP api.Autopilot - Address types.Address - Fee types.Currency + Address types.Address + Fee types.Currency + SkipContractFormations bool } ) -func (state *State) AllowRedundantIPs() bool { +func (state *MaintenanceState) AllowRedundantIPs() bool { return state.AP.Config.Hosts.AllowRedundantIPs } -func (state *State) Allowance() types.Currency { +func (state *MaintenanceState) Allowance() types.Currency { return state.AP.Config.Contracts.Allowance } -func (state *State) AutopilotConfig() api.AutopilotConfig { +func (state *MaintenanceState) AutopilotConfig() api.AutopilotConfig { return state.AP.Config } -func (state *State) ContractsConfig() api.ContractsConfig { +func (state *MaintenanceState) ContractsConfig() api.ContractsConfig { return state.AP.Config.Contracts } -func (state *State) ContractSet() string { +func (state *MaintenanceState) ContractSet() string { return state.AP.Config.Contracts.Set } -func (s *State) EndHeight() uint64 { - return s.AP.EndHeight() +func (state *MaintenanceState) EndHeight() uint64 { + return state.AP.EndHeight() } -func (state *State) WantedContracts() uint64 { +func (state *MaintenanceState) WantedContracts() uint64 { return state.AP.Config.Contracts.Amount } -func (state *State) Period() uint64 { +func (state *MaintenanceState) Period() uint64 { return state.AP.Config.Contracts.Period } -func (state *State) RenewWindow() uint64 { +func (state *MaintenanceState) RenewWindow() uint64 { return state.AP.Config.Contracts.RenewWindow } From 6095bc8d36637466dec3c9dd43a9bfed1a6f9264 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 16:11:40 +0100 Subject: [PATCH 088/201] contractor: add GougingChecker helper to State --- autopilot/contractor/contractor.go | 10 +++++----- autopilot/contractor/hostinfo.go | 5 ++--- autopilot/contractor/state.go | 5 +++++ 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index e3957af3a..f27902e22 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -317,7 +317,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s } // create gouging checker - gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc := state.GougingChecker(cs) // prepare hosts for cache hostInfos := make(map[types.PublicKey]hostInfo) @@ -699,7 +699,7 @@ LOOP: } // use a new gouging checker for every contract - gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc := state.GougingChecker(cs) // set the host's block height to ours to disable the height check in // the gouging checks, in certain edge cases the renter might unsync and @@ -826,7 +826,7 @@ func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state lastStateUpdate := time.Now() // prepare a gouging checker - gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc := state.GougingChecker(cs) // prepare an IP filter that contains all used hosts ipFilter := c.newIPFilter() @@ -866,7 +866,7 @@ LOOP: c.logger.Errorf("could not fetch consensus state, err: %v", err) } else { cs = css - gc = worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc = state.GougingChecker(cs) } } @@ -1244,7 +1244,7 @@ func (c *Contractor) candidateHosts(ctx context.Context, state *MaintenanceState } // create a gouging checker - gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc := state.GougingChecker(cs) // select unused hosts that passed a scan var unused []hostdb.HostInfo diff --git a/autopilot/contractor/hostinfo.go b/autopilot/contractor/hostinfo.go index 87f530943..fbcb9434a 100644 --- a/autopilot/contractor/hostinfo.go +++ b/autopilot/contractor/hostinfo.go @@ -7,7 +7,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" - "go.sia.tech/renterd/worker" ) func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, state *MaintenanceState) (api.HostHandlerResponse, error) { @@ -34,7 +33,7 @@ func (c *Contractor) HostInfo(ctx context.Context, hostKey types.PublicKey, stat minScore := c.cachedMinScore c.mu.Unlock() - gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc := state.GougingChecker(cs) // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight @@ -74,7 +73,7 @@ func (c *Contractor) hostInfoFromCache(ctx context.Context, state *MaintenanceSt if err != nil { c.logger.Error("failed to fetch consensus state from bus: %v", err) } else { - gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc := state.GougingChecker(cs) isUsable, unusableResult := isUsableHost(state.ContractsConfig(), state.RS, gc, host, minScore, storedData) hi = hostInfo{ Usable: isUsable, diff --git a/autopilot/contractor/state.go b/autopilot/contractor/state.go index 2ae853dac..2d505d389 100644 --- a/autopilot/contractor/state.go +++ b/autopilot/contractor/state.go @@ -3,6 +3,7 @@ package contractor import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/worker" ) type ( @@ -44,6 +45,10 @@ func (state *MaintenanceState) EndHeight() uint64 { return state.AP.EndHeight() } +func (state *MaintenanceState) GougingChecker(cs api.ConsensusState) worker.GougingChecker { + return worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) +} + func (state *MaintenanceState) WantedContracts() uint64 { return state.AP.Config.Contracts.Amount } From 8a69642232ee0f130b82580abe5335e5c4542f2d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 16:21:46 +0100 Subject: [PATCH 089/201] autopilot: fix panic in TestScanner --- autopilot/scanner.go | 3 ++- autopilot/scanner_test.go | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 339d3645d..359517beb 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -33,6 +33,7 @@ type ( // scanner tests with every interface change bus interface { SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) + Autopilot(ctx context.Context, id string) (autopilot api.Autopilot, err error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } @@ -211,7 +212,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo // fetch the config right before removing offline hosts to get the most // recent settings in case they were updated while scanning. - autopilot, err := s.ap.Config(ctx) + autopilot, err := s.bus.Autopilot(ctx, s.ap.id) if err != nil { s.logger.Errorf("tryPerformHostScan: failed to fetch autopilot config: %v", err) return diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 66fb16730..3c3850b7c 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -20,6 +20,10 @@ type mockBus struct { reqs []string } +func (b *mockBus) Autopilot(ctx context.Context, id string) (autopilot api.Autopilot, err error) { + return api.Autopilot{}, nil +} + func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) From 15f70176168657a8ada395b532b7c9a5300f6a5a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 16:30:01 +0100 Subject: [PATCH 090/201] e2e: fix TestBlocklist --- autopilot/autopilot.go | 2 +- autopilot/contractor/contractor.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 512fb5d6b..d19d662d5 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -150,7 +150,7 @@ func New(id string, bus Bus, workers []Worker, logger *zap.Logger, heartbeat tim } ap.s = scanner - ap.c = contractor.New(bus, ap.logger, revisionSubmissionBuffer, revisionBroadcastInterval) + ap.c = contractor.New(bus, bus, ap.logger, revisionSubmissionBuffer, revisionBroadcastInterval) ap.m = newMigrator(ap, migrationHealthCutoff, migratorParallelSlabsPerWorker) ap.a = newAccounts(ap, ap.bus, ap.bus, ap.workers, ap.logger, accountsRefillInterval) diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index f27902e22..108d6d658 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -172,10 +172,11 @@ type ( } ) -func New(alerter alerts.Alerter, logger *zap.SugaredLogger, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *Contractor { +func New(bus Bus, alerter alerts.Alerter, logger *zap.SugaredLogger, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *Contractor { logger = logger.Named("contractor") ctx, cancel := context.WithCancel(context.Background()) return &Contractor{ + bus: bus, alerter: alerter, churn: newAccumulatedChurn(), logger: logger, From 4b31b83f682f8be20ab13330ecc8ab6815ca225a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 19 Mar 2024 16:58:56 +0100 Subject: [PATCH 091/201] e2e: fix TestWalletFormUnconfirmed --- autopilot/autopilot.go | 3 +++ autopilot/scanner.go | 40 ++++++++++++++------------------------- autopilot/scanner_test.go | 4 ---- 3 files changed, 17 insertions(+), 30 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index d19d662d5..f7f080297 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -284,6 +284,9 @@ func (ap *Autopilot) Run() error { return } + // prune hosts that have been offline for too long + ap.s.PruneHosts(ap.shutdownCtx, autopilot.Config.Hosts) + // Log worker id chosen for this maintenance iteration. workerID, err := w.ID(ap.shutdownCtx) if err != nil { diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 359517beb..91fc1238d 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -33,7 +33,6 @@ type ( // scanner tests with every interface change bus interface { SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) - Autopilot(ctx context.Context, id string) (autopilot api.Autopilot, err error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } @@ -199,44 +198,33 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo go func(st string) { defer s.wg.Done() - var interrupted bool for resp := range s.launchScanWorkers(ctx, w, s.launchHostScans()) { if s.isInterrupted() || s.ap.isStopped() { - interrupted = true break } if resp.err != nil && !strings.Contains(resp.err.Error(), "connection refused") { s.logger.Error(resp.err) } } - - // fetch the config right before removing offline hosts to get the most - // recent settings in case they were updated while scanning. - autopilot, err := s.bus.Autopilot(ctx, s.ap.id) - if err != nil { - s.logger.Errorf("tryPerformHostScan: failed to fetch autopilot config: %v", err) - return - } - hostCfg := autopilot.Config.Hosts - maxDowntime := time.Duration(hostCfg.MaxDowntimeHours) * time.Hour - minRecentScanFailures := hostCfg.MinRecentScanFailures - - if !interrupted && maxDowntime > 0 { - s.logger.Debugf("removing hosts that have been offline for more than %v and have failed at least %d scans", maxDowntime, minRecentScanFailures) - removed, err := s.bus.RemoveOfflineHosts(ctx, minRecentScanFailures, maxDowntime) - if err != nil { - s.logger.Errorf("error occurred while removing offline hosts, err: %v", err) - } else if removed > 0 { - s.logger.Infof("removed %v offline hosts", removed) - } - } - s.mu.Lock() s.scanning = false s.logger.Debugf("%s finished after %v", st, time.Since(s.scanningLastStart)) s.mu.Unlock() }(scanType) - return +} + +func (s *scanner) PruneHosts(ctx context.Context, cfg api.HostsConfig) { + maxDowntime := time.Duration(cfg.MaxDowntimeHours) * time.Hour + minRecentScanFailures := cfg.MinRecentScanFailures + if maxDowntime > 0 { + s.logger.Debugf("removing hosts that have been offline for more than %v and have failed at least %d scans", maxDowntime, minRecentScanFailures) + removed, err := s.bus.RemoveOfflineHosts(ctx, minRecentScanFailures, maxDowntime) + if err != nil { + s.logger.Errorf("error occurred while removing offline hosts, err: %v", err) + } else if removed > 0 { + s.logger.Infof("removed %v offline hosts", removed) + } + } } func (s *scanner) tryUpdateTimeout() { diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 3c3850b7c..66fb16730 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -20,10 +20,6 @@ type mockBus struct { reqs []string } -func (b *mockBus) Autopilot(ctx context.Context, id string) (autopilot api.Autopilot, err error) { - return api.Autopilot{}, nil -} - func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) From dc56acaab730daa70c10547d5a31f750b4187d32 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 14:10:25 +0100 Subject: [PATCH 092/201] stores: add dbHostInfo --- api/autopilot.go | 59 ---- api/host.go | 82 +++++ stores/hostdb.go | 331 ++++++++++++++++-- stores/hostdb_test.go | 217 ++++++++++++ stores/migrations.go | 6 + .../mysql/main/migration_00007_host_info.sql | 52 +++ stores/migrations/mysql/main/schema.sql | 53 +++ .../sqlite/main/migration_00007_host_info.sql | 52 +++ stores/migrations/sqlite/main/schema.sql | 19 + 9 files changed, 787 insertions(+), 84 deletions(-) create mode 100644 stores/migrations/mysql/main/migration_00007_host_info.sql create mode 100644 stores/migrations/sqlite/main/migration_00007_host_info.sql diff --git a/api/autopilot.go b/api/autopilot.go index fdd6c4942..22598b28c 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -2,8 +2,6 @@ package api import ( "errors" - "fmt" - "strings" "go.sia.tech/core/types" "go.sia.tech/renterd/hostdb" @@ -136,65 +134,8 @@ type ( Usable bool `json:"usable"` UnusableReasons []string `json:"unusableReasons"` } - - HostGougingBreakdown struct { - ContractErr string `json:"contractErr"` - DownloadErr string `json:"downloadErr"` - GougingErr string `json:"gougingErr"` - PruneErr string `json:"pruneErr"` - UploadErr string `json:"uploadErr"` - } - - HostScoreBreakdown struct { - Age float64 `json:"age"` - Collateral float64 `json:"collateral"` - Interactions float64 `json:"interactions"` - StorageRemaining float64 `json:"storageRemaining"` - Uptime float64 `json:"uptime"` - Version float64 `json:"version"` - Prices float64 `json:"prices"` - } ) -func (sb HostScoreBreakdown) String() string { - return fmt.Sprintf("Age: %v, Col: %v, Int: %v, SR: %v, UT: %v, V: %v, Pr: %v", sb.Age, sb.Collateral, sb.Interactions, sb.StorageRemaining, sb.Uptime, sb.Version, sb.Prices) -} - -func (hgb HostGougingBreakdown) Gouging() bool { - for _, err := range []string{ - hgb.ContractErr, - hgb.DownloadErr, - hgb.GougingErr, - hgb.PruneErr, - hgb.UploadErr, - } { - if err != "" { - return true - } - } - return false -} - -func (hgb HostGougingBreakdown) String() string { - var reasons []string - for _, errStr := range []string{ - hgb.ContractErr, - hgb.DownloadErr, - hgb.GougingErr, - hgb.PruneErr, - hgb.UploadErr, - } { - if errStr != "" { - reasons = append(reasons, errStr) - } - } - return strings.Join(reasons, ";") -} - -func (sb HostScoreBreakdown) Score() float64 { - return sb.Age * sb.Collateral * sb.Interactions * sb.StorageRemaining * sb.Uptime * sb.Version * sb.Prices -} - func (c AutopilotConfig) Validate() error { if c.Hosts.MaxDowntimeHours > 99*365*24 { return ErrMaxDowntimeHoursTooHigh diff --git a/api/host.go b/api/host.go index 293403c0a..2c1e82f30 100644 --- a/api/host.go +++ b/api/host.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "net/url" + "strings" "go.sia.tech/core/types" "go.sia.tech/renterd/hostdb" @@ -23,6 +24,10 @@ var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") + + // ErrHostInfoNotFound is returned when host info can't be retrieved from + // the database. + ErrHostInfoNotFound = errors.New("host info doesn't exist in hostdb") ) type ( @@ -109,3 +114,80 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { values.Set("lastScan", TimeRFC3339(opts.MaxLastScan).String()) } } + +type ( + HostInfo struct { + Host hostdb.Host `json:"host"` + Gouging HostGougingBreakdown `json:"gouging"` + Score HostScoreBreakdown `json:"score"` + Usability HostUsabilityBreakdown `json:"usability"` + } + + HostGougingBreakdown struct { + ContractErr string `json:"contractErr"` + DownloadErr string `json:"downloadErr"` + GougingErr string `json:"gougingErr"` + PruneErr string `json:"pruneErr"` + UploadErr string `json:"uploadErr"` + } + + HostScoreBreakdown struct { + Age float64 `json:"age"` + Collateral float64 `json:"collateral"` + Interactions float64 `json:"interactions"` + StorageRemaining float64 `json:"storageRemaining"` + Uptime float64 `json:"uptime"` + Version float64 `json:"version"` + Prices float64 `json:"prices"` + } + + HostUsabilityBreakdown struct { + Blocked bool `json:"blocked"` + Offline bool `json:"offline"` + LowScore bool `json:"lowScore"` + RedundantIP bool `json:"redundantIP"` + Gouging bool `json:"gouging"` + NotAcceptingContracts bool `json:"notAcceptingContracts"` + NotAnnounced bool `json:"notAnnounced"` + NotCompletingScan bool `json:"notCompletingScan"` + } +) + +func (sb HostScoreBreakdown) String() string { + return fmt.Sprintf("Age: %v, Col: %v, Int: %v, SR: %v, UT: %v, V: %v, Pr: %v", sb.Age, sb.Collateral, sb.Interactions, sb.StorageRemaining, sb.Uptime, sb.Version, sb.Prices) +} + +func (hgb HostGougingBreakdown) Gouging() bool { + for _, err := range []string{ + hgb.ContractErr, + hgb.DownloadErr, + hgb.GougingErr, + hgb.PruneErr, + hgb.UploadErr, + } { + if err != "" { + return true + } + } + return false +} + +func (hgb HostGougingBreakdown) String() string { + var reasons []string + for _, errStr := range []string{ + hgb.ContractErr, + hgb.DownloadErr, + hgb.GougingErr, + hgb.PruneErr, + hgb.UploadErr, + } { + if errStr != "" { + reasons = append(reasons, errStr) + } + } + return strings.Join(reasons, ";") +} + +func (sb HostScoreBreakdown) Score() float64 { + return sb.Age * sb.Collateral * sb.Interactions * sb.StorageRemaining * sb.Uptime * sb.Version * sb.Prices +} diff --git a/stores/hostdb.go b/stores/hostdb.go index 95e37a26c..c344fd83f 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -80,6 +80,44 @@ type ( Blocklist []dbBlocklistEntry `gorm:"many2many:host_blocklist_entry_hosts;constraint:OnDelete:CASCADE"` } + // dbHostInfo contains information about a host that is collected and used + // by the autopilot. + dbHostInfo struct { + Model + + DBAutopilotID uint `gorm:"index:idx_host_infos_id,unique"` + DBAutopilot dbAutopilot + + DBHostID uint `gorm:"index:idx_host_infos_id,unique"` + DBHost dbHost + + // usability + UsabilityBlocked bool `gorm:"index:idx_host_infos_usability_blocked"` + UsabilityOffline bool `gorm:"index:idx_host_infos_usability_offline"` + UsabilityLowScore bool `gorm:"index:idx_host_infos_usability_low_score"` + UsabilityRedundantIP bool `gorm:"index:idx_host_infos_usability_redundant_ip"` + UsabilityGouging bool `gorm:"index:idx_host_infos_usability_gouging"` + UsabilityNotAcceptingContracts bool `gorm:"index:idx_host_infos_usability_not_accepting_contracts"` + UsabilityNotAnnounced bool `gorm:"index:idx_host_infos_usability_not_announced"` + UsabilityNotCompletingScan bool `gorm:"index:idx_host_infos_usability_not_completing_scan"` + + // score + ScoreAge float64 `gorm:"index:idx_host_infos_score_age"` + ScoreCollateral float64 `gorm:"index:idx_host_infos_score_collateral"` + ScoreInteractions float64 `gorm:"index:idx_host_infos_score_interactions"` + ScoreStorageRemaining float64 `gorm:"index:idx_host_infos_score_storage_remaining"` + ScoreUptime float64 `gorm:"index:idx_host_infos_score_uptime"` + ScoreVersion float64 `gorm:"index:idx_host_infos_score_version"` + ScorePrices float64 `gorm:"index:idx_host_infos_score_prices"` + + // gouging + GougingContractErr string + GougingDownloadErr string + GougingGougingErr string + GougingPruneErr string + GougingUploadErr string + } + // dbAllowlistEntry defines a table that stores the host blocklist. dbAllowlistEntry struct { Model @@ -263,6 +301,9 @@ func (dbConsensusInfo) TableName() string { return "consensus_infos" } // TableName implements the gorm.Tabler interface. func (dbHost) TableName() string { return "hosts" } +// TableName implements the gorm.Tabler interface. +func (dbHostInfo) TableName() string { return "host_infos" } + // TableName implements the gorm.Tabler interface. func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } @@ -300,6 +341,68 @@ func (h dbHost) convert() hostdb.Host { } } +func (hi dbHostInfo) convert() api.HostInfo { + return api.HostInfo{ + Host: hi.DBHost.convert(), + Gouging: api.HostGougingBreakdown{ + ContractErr: hi.GougingContractErr, + DownloadErr: hi.GougingDownloadErr, + GougingErr: hi.GougingGougingErr, + PruneErr: hi.GougingPruneErr, + UploadErr: hi.GougingUploadErr, + }, + Score: api.HostScoreBreakdown{ + Age: hi.ScoreAge, + Collateral: hi.ScoreCollateral, + Interactions: hi.ScoreInteractions, + StorageRemaining: hi.ScoreStorageRemaining, + Uptime: hi.ScoreUptime, + Version: hi.ScoreVersion, + Prices: hi.ScorePrices, + }, + Usability: api.HostUsabilityBreakdown{ + Blocked: hi.UsabilityBlocked, + Offline: hi.UsabilityOffline, + LowScore: hi.UsabilityLowScore, + RedundantIP: hi.UsabilityRedundantIP, + Gouging: hi.UsabilityGouging, + NotAcceptingContracts: hi.UsabilityNotAcceptingContracts, + NotAnnounced: hi.UsabilityNotAnnounced, + NotCompletingScan: hi.UsabilityNotCompletingScan, + }, + } +} + +func convertHostInfo(apID, hID uint, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) *dbHostInfo { + return &dbHostInfo{ + DBAutopilotID: apID, + DBHostID: hID, + + UsabilityBlocked: usability.Blocked, + UsabilityOffline: usability.Offline, + UsabilityLowScore: usability.LowScore, + UsabilityRedundantIP: usability.RedundantIP, + UsabilityGouging: usability.Gouging, + UsabilityNotAcceptingContracts: usability.NotAcceptingContracts, + UsabilityNotAnnounced: usability.NotAnnounced, + UsabilityNotCompletingScan: usability.NotCompletingScan, + + ScoreAge: score.Age, + ScoreCollateral: score.Collateral, + ScoreInteractions: score.Interactions, + ScoreStorageRemaining: score.StorageRemaining, + ScoreUptime: score.Uptime, + ScoreVersion: score.Version, + ScorePrices: score.Prices, + + GougingContractErr: gouging.ContractErr, + GougingDownloadErr: gouging.DownloadErr, + GougingGougingErr: gouging.GougingErr, + GougingPruneErr: gouging.PruneErr, + GougingUploadErr: gouging.UploadErr, + } +} + func (h *dbHost) BeforeCreate(tx *gorm.DB) (err error) { tx.Statement.AddClause(clause.OnConflict{ Columns: []clause.Column{{Name: "public_key"}}, @@ -426,6 +529,180 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.H }, nil } +func (ss *SQLStore) HostInfo(ctx context.Context, autopilotID string, hk types.PublicKey) (hi api.HostInfo, err error) { + err = ss.db.Transaction(func(tx *gorm.DB) error { + // fetch ap id + var apID uint + if err := tx. + Model(&dbAutopilot{}). + Where("identifier = ?", autopilotID). + Select("id"). + Take(&apID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrAutopilotNotFound + } else if err != nil { + return err + } + + // fetch host id + var hID uint + if err := tx. + Model(&dbHost{}). + Where("public_key = ?", publicKey(hk)). + Select("id"). + Take(&hID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrHostNotFound + } else if err != nil { + return err + } + + // fetch host info + var entity dbHostInfo + if err := tx. + Model(&dbHostInfo{}). + Where("db_autopilot_id = ? AND db_host_id = ?", apID, hID). + Preload("DBHost"). + First(&entity). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrHostInfoNotFound + } else if err != nil { + return err + } + + hi = entity.convert() + return nil + }) + return +} + +func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (his []api.HostInfo, err error) { + if offset < 0 { + return nil, ErrNegativeOffset + } + + err = ss.db.Transaction(func(tx *gorm.DB) error { + // fetch ap id + var apID uint + if err := tx. + Model(&dbAutopilot{}). + Where("identifier = ?", autopilotID). + Select("id"). + Take(&apID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrAutopilotNotFound + } else if err != nil { + return err + } + + // prepare query + query := tx. + Model(&dbHostInfo{}). + Where("db_autopilot_id = ?", apID). + Joins("DBHost") + + // apply mode filter + switch filterMode { + case api.HostFilterModeAllowed: + query = query.Scopes(ss.excludeBlocked("DBHost")) + case api.HostFilterModeBlocked: + query = query.Scopes(ss.excludeAllowed("DBHost")) + case api.HostFilterModeAll: + // nothing to do + default: + return fmt.Errorf("invalid filter mode: %v", filterMode) + } + + // apply usability filter + switch usabilityMode { + case api.UsabilityFilterModeUsable: + query = query.Where("usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ?", + false, false, false, false, false, false, false, false) + case api.UsabilityFilterModeUnusable: + query = query.Where("usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ?", + true, true, true, true, true, true, true, true) + case api.UsabilityFilterModeAll: + // nothing to do + default: + return fmt.Errorf("invalid usability mode: %v", usabilityMode) + } + + // apply address filter + if addressContains != "" { + query = query.Scopes(func(d *gorm.DB) *gorm.DB { + return d.Where("net_address LIKE ?", "%"+addressContains+"%") + }) + } + + // apply key filter + if len(keyIn) > 0 { + pubKeys := make([]publicKey, len(keyIn)) + for i, pk := range keyIn { + pubKeys[i] = publicKey(pk) + } + query = query.Scopes(func(d *gorm.DB) *gorm.DB { + return d.Where("public_key IN ?", pubKeys) + }) + } + + // fetch host info + var infos []dbHostInfo + if err := query. + Debug(). + Offset(offset). + Limit(limit). + Find(&infos). + Error; err != nil { + return err + } + for _, hi := range infos { + his = append(his, hi.convert()) + } + return nil + }) + return +} + +func (ss *SQLStore) UpdateHostInfo(ctx context.Context, autopilotID string, hk types.PublicKey, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) (err error) { + err = ss.db.Transaction(func(tx *gorm.DB) error { + // fetch ap id + var apID uint + if err := tx. + Model(&dbAutopilot{}). + Where("identifier = ?", autopilotID). + Select("id"). + Take(&apID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrAutopilotNotFound + } else if err != nil { + return err + } + + // fetch host id + var hID uint + if err := tx. + Model(&dbHost{}). + Where("public_key = ?", publicKey(hk)). + Select("id"). + Take(&hID). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrHostNotFound + } else if err != nil { + return err + } + + // update host info + return tx. + Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "db_autopilot_id"}, {Name: "db_host_id"}}, + UpdateAll: true, + }). + Create(convertHostInfo(apID, hID, gouging, score, usability)). + Error + }) + return +} + // HostsForScanning returns the address of hosts for scanning. func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) { if offset < 0 { @@ -471,9 +748,9 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains query := ss.db switch filterMode { case api.HostFilterModeAllowed: - query = query.Scopes(ss.excludeBlocked) + query = query.Scopes(ss.excludeBlocked("hosts")) case api.HostFilterModeBlocked: - query = query.Scopes(ss.excludeAllowed) + query = query.Scopes(ss.excludeAllowed("hosts")) blocked = true case api.HostFilterModeAll: // preload allowlist and blocklist @@ -932,37 +1209,41 @@ func (ss *SQLStore) processConsensusChangeHostDB(cc modules.ConsensusChange) { // excludeBlocked can be used as a scope for a db transaction to exclude blocked // hosts. -func (ss *SQLStore) excludeBlocked(db *gorm.DB) *gorm.DB { - ss.mu.Lock() - defer ss.mu.Unlock() +func (ss *SQLStore) excludeBlocked(alias string) func(db *gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + ss.mu.Lock() + defer ss.mu.Unlock() - if ss.hasAllowlist { - db = db.Where("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if ss.hasBlocklist { - db = db.Where("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") + if ss.hasAllowlist { + db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + if ss.hasBlocklist { + db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + return db } - return db } // excludeAllowed can be used as a scope for a db transaction to exclude allowed // hosts. -func (ss *SQLStore) excludeAllowed(db *gorm.DB) *gorm.DB { - ss.mu.Lock() - defer ss.mu.Unlock() +func (ss *SQLStore) excludeAllowed(alias string) func(db *gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + ss.mu.Lock() + defer ss.mu.Unlock() - if ss.hasAllowlist { - db = db.Where("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if ss.hasBlocklist { - db = db.Where("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") - } - if !ss.hasAllowlist && !ss.hasBlocklist { - // if neither an allowlist nor a blocklist exist, all hosts are allowed - // which means we return none - db = db.Where("1 = 0") + if ss.hasAllowlist { + db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + if ss.hasBlocklist { + db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + } + if !ss.hasAllowlist && !ss.hasBlocklist { + // if neither an allowlist nor a blocklist exist, all hosts are allowed + // which means we return none + db = db.Where("1 = 0") + } + return db } - return db } func (ss *SQLStore) isBlocked(h dbHost) (blocked bool) { diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 35872ea2d..8a75caf6d 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -1064,6 +1064,191 @@ func TestAnnouncementMaxAge(t *testing.T) { } } +func TestHostInfo(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // fetch info for a non-existing autopilot + _, err := ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if !errors.Is(err, api.ErrAutopilotNotFound) { + t.Fatal(err) + } + + // add autopilot + err = ss.UpdateAutopilot(context.Background(), api.Autopilot{ID: "foo"}) + if err != nil { + t.Fatal(err) + } + + // fetch info for a non-existing host + _, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if !errors.Is(err, api.ErrHostNotFound) { + t.Fatal(err) + } + + // add host + err = ss.addTestHost(types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } + h, err := ss.Host(context.Background(), types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } + + // fetch non-existing info + _, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if !errors.Is(err, api.ErrHostInfoNotFound) { + t.Fatal(err) + } + + // add host info + want := newTestHostInfo(h.Host) + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + + // fetch info + got, err := ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(got, want) { + t.Fatal("mismatch", cmp.Diff(got, want)) + } + + // update info + want.Score.Age = 0 + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + + // fetch info + got, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) + if err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(got, want) { + t.Fatal("mismatch") + } + + // add another host info + err = ss.addCustomTestHost(types.PublicKey{2}, "bar.com:1000") + if err != nil { + t.Fatal(err) + } + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{2}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + + // fetch all infos for autopilot + his, err := ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 2 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{1}) || his[1].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } + + // fetch infos using offset & limit + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 1, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 2, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 0 { + t.Fatal("unexpected") + } + + // fetch infos using net addresses + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "bar", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } + + // fetch infos using keyIn + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{{2}}, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } + + // fetch infos using mode filters + err = ss.UpdateHostBlocklistEntries(context.Background(), []string{"bar.com:1000"}, nil, false) + if err != nil { + t.Fatal(err) + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{1}) { + t.Fatal("unexpected", his) + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{2}) { + t.Fatal("unexpected", his) + } + err = ss.UpdateHostBlocklistEntries(context.Background(), nil, nil, true) + if err != nil { + t.Fatal(err) + } + + // fetch infos using usability filters + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 0 { + t.Fatal("unexpected") + } + + // update info + want.Usability.Blocked = false + want.Usability.Offline = false + want.Usability.LowScore = false + want.Usability.RedundantIP = false + want.Usability.Gouging = false + want.Usability.NotAcceptingContracts = false + want.Usability.NotAnnounced = false + want.Usability.NotCompletingScan = false + err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) + if err != nil { + t.Fatal(err) + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{1}) { + t.Fatal("unexpected", his) + } +} + // addTestHosts adds 'n' hosts to the db and returns their keys. func (s *SQLStore) addTestHosts(n int) (keys []types.PublicKey, err error) { cnt, err := s.contractsCount() @@ -1156,3 +1341,35 @@ func newTestTransaction(ha modules.HostAnnouncement, sk types.PrivateKey) stypes buf.Write(encoding.Marshal(sk.SignHash(types.Hash256(crypto.HashObject(ha))))) return stypes.Transaction{ArbitraryData: [][]byte{buf.Bytes()}} } + +func newTestHostInfo(h hostdb.Host) api.HostInfo { + return api.HostInfo{ + Host: h, + Gouging: api.HostGougingBreakdown{ + ContractErr: "foo", + DownloadErr: "bar", + GougingErr: "baz", + PruneErr: "qux", + UploadErr: "quuz", + }, + Score: api.HostScoreBreakdown{ + Age: .1, + Collateral: .2, + Interactions: .3, + StorageRemaining: .4, + Uptime: .5, + Version: .6, + Prices: .7, + }, + Usability: api.HostUsabilityBreakdown{ + Blocked: true, + Offline: true, + LowScore: true, + RedundantIP: true, + Gouging: true, + NotAcceptingContracts: true, + NotAnnounced: true, + NotCompletingScan: true, + }, + } +} diff --git a/stores/migrations.go b/stores/migrations.go index 6ccc75964..9f874935a 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -62,6 +62,12 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return performMigration(tx, dbIdentifier, "00006_idx_objects_created_at", logger) }, }, + { + ID: "00007_host_info", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00007_host_info", logger) + }, + }, } // Create migrator. diff --git a/stores/migrations/mysql/main/migration_00007_host_info.sql b/stores/migrations/mysql/main/migration_00007_host_info.sql new file mode 100644 index 000000000..c13f5c396 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00007_host_info.sql @@ -0,0 +1,52 @@ +-- dbHostInfo +CREATE TABLE `host_infos` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + + `db_autopilot_id` bigint unsigned NOT NULL, + `db_host_id` bigint unsigned NOT NULL, + + `usability_blocked` boolean NOT NULL DEFAULT false, + `usability_offline` boolean NOT NULL DEFAULT false, + `usability_low_score` boolean NOT NULL DEFAULT false, + `usability_redundant_ip` boolean NOT NULL DEFAULT false, + `usability_gouging` boolean NOT NULL DEFAULT false, + `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, + `usability_not_announced` boolean NOT NULL DEFAULT false, + `usability_not_completing_scan` boolean NOT NULL DEFAULT false, + + `score_age` double NOT NULL, + `score_collateral` double NOT NULL, + `score_interactions` double NOT NULL, + `score_storage_remaining` double NOT NULL, + `score_uptime` double NOT NULL, + `score_version` double NOT NULL, + `score_prices` double NOT NULL, + + `gouging_contract_err` text, + `gouging_download_err` text, + `gouging_gouging_err` text, + `gouging_prune_err` text, + `gouging_upload_err` text, + + PRIMARY KEY (`id`), + UNIQUE KEY `idx_host_infos_id` (`db_autopilot_id`, `db_host_id`), + INDEX `idx_host_infos_usability_blocked` (`usability_blocked`), + INDEX `idx_host_infos_usability_offline` (`usability_offline`), + INDEX `idx_host_infos_usability_low_score` (`usability_low_score`), + INDEX `idx_host_infos_usability_redundant_ip` (`usability_redundant_ip`), + INDEX `idx_host_infos_usability_gouging` (`usability_gouging`), + INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), + INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), + INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), + INDEX `idx_host_infos_score_age` (`score_age`), + INDEX `idx_host_infos_score_collateral` (`score_collateral`), + INDEX `idx_host_infos_score_interactions` (`score_interactions`), + INDEX `idx_host_infos_score_storage_remaining` (`score_storage_remaining`), + INDEX `idx_host_infos_score_uptime` (`score_uptime`), + INDEX `idx_host_infos_score_version` (`score_version`), + INDEX `idx_host_infos_score_prices` (`score_prices`), + + CONSTRAINT `fk_host_infos_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_host_infos_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 68b42ae47..e39b7f963 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -422,5 +422,58 @@ CREATE TABLE `object_user_metadata` ( CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; +-- dbHostInfo +CREATE TABLE `host_infos` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + + `db_autopilot_id` bigint unsigned NOT NULL, + `db_host_id` bigint unsigned NOT NULL, + + `usability_blocked` boolean NOT NULL DEFAULT false, + `usability_offline` boolean NOT NULL DEFAULT false, + `usability_low_score` boolean NOT NULL DEFAULT false, + `usability_redundant_ip` boolean NOT NULL DEFAULT false, + `usability_gouging` boolean NOT NULL DEFAULT false, + `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, + `usability_not_announced` boolean NOT NULL DEFAULT false, + `usability_not_completing_scan` boolean NOT NULL DEFAULT false, + + `score_age` double NOT NULL, + `score_collateral` double NOT NULL, + `score_interactions` double NOT NULL, + `score_storage_remaining` double NOT NULL, + `score_uptime` double NOT NULL, + `score_version` double NOT NULL, + `score_prices` double NOT NULL, + + `gouging_contract_err` text, + `gouging_download_err` text, + `gouging_gouging_err` text, + `gouging_prune_err` text, + `gouging_upload_err` text, + + PRIMARY KEY (`id`), + UNIQUE KEY `idx_host_infos_id` (`db_autopilot_id`, `db_host_id`), + INDEX `idx_host_infos_usability_blocked` (`usability_blocked`), + INDEX `idx_host_infos_usability_offline` (`usability_offline`), + INDEX `idx_host_infos_usability_low_score` (`usability_low_score`), + INDEX `idx_host_infos_usability_redundant_ip` (`usability_redundant_ip`), + INDEX `idx_host_infos_usability_gouging` (`usability_gouging`), + INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), + INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), + INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), + INDEX `idx_host_infos_score_age` (`score_age`), + INDEX `idx_host_infos_score_collateral` (`score_collateral`), + INDEX `idx_host_infos_score_interactions` (`score_interactions`), + INDEX `idx_host_infos_score_storage_remaining` (`score_storage_remaining`), + INDEX `idx_host_infos_score_uptime` (`score_uptime`), + INDEX `idx_host_infos_score_version` (`score_version`), + INDEX `idx_host_infos_score_prices` (`score_prices`), + + CONSTRAINT `fk_host_infos_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_host_infos_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); \ No newline at end of file diff --git a/stores/migrations/sqlite/main/migration_00007_host_info.sql b/stores/migrations/sqlite/main/migration_00007_host_info.sql new file mode 100644 index 000000000..910dd637c --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00007_host_info.sql @@ -0,0 +1,52 @@ +-- dbHostInfo +CREATE TABLE `host_infos` ( + `id` INTEGER PRIMARY KEY AUTOINCREMENT, + `created_at` datetime, + + `db_autopilot_id` INTEGER NOT NULL, + `db_host_id` INTEGER NOT NULL, + + `usability_blocked` INTEGER NOT NULL DEFAULT 0, + `usability_offline` INTEGER NOT NULL DEFAULT 0, + `usability_low_score` INTEGER NOT NULL DEFAULT 0, + `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, + `usability_gouging` INTEGER NOT NULL DEFAULT 0, + `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, + `usability_not_announced` INTEGER NOT NULL DEFAULT 0, + `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, + + `score_age` REAL NOT NULL, + `score_collateral` REAL NOT NULL, + `score_interactions` REAL NOT NULL, + `score_storage_remaining` REAL NOT NULL, + `score_uptime` REAL NOT NULL, + `score_version` REAL NOT NULL, + `score_prices` REAL NOT NULL, + + `gouging_contract_err` TEXT, + `gouging_download_err` TEXT, + `gouging_gouging_err` TEXT, + `gouging_prune_err` TEXT, + `gouging_upload_err` TEXT, + + FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +); + +-- Indexes creation +CREATE UNIQUE INDEX `idx_host_infos_id` ON `host_infos` (`db_autopilot_id`, `db_host_id`); +CREATE INDEX `idx_host_infos_usability_blocked` ON `host_infos` (`usability_blocked`); +CREATE INDEX `idx_host_infos_usability_offline` ON `host_infos` (`usability_offline`); +CREATE INDEX `idx_host_infos_usability_low_score` ON `host_infos` (`usability_low_score`); +CREATE INDEX `idx_host_infos_usability_redundant_ip` ON `host_infos` (`usability_redundant_ip`); +CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_gouging`); +CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); +CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); +CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); +CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); +CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); +CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); +CREATE INDEX `idx_host_infos_score_storage_remaining` ON `host_infos` (`score_storage_remaining`); +CREATE INDEX `idx_host_infos_score_uptime` ON `host_infos` (`score_uptime`); +CREATE INDEX `idx_host_infos_score_version` ON `host_infos` (`score_version`); +CREATE INDEX `idx_host_infos_score_prices` ON `host_infos` (`score_prices`); diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 9875e81e3..791fce1ca 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -149,5 +149,24 @@ CREATE UNIQUE INDEX `idx_module_event_url` ON `webhooks`(`module`,`event`,`url`) CREATE TABLE `object_user_metadata` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer DEFAULT NULL,`db_multipart_upload_id` integer DEFAULT NULL,`key` text NOT NULL,`value` text, CONSTRAINT `fk_object_user_metadata` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL); CREATE UNIQUE INDEX `idx_object_user_metadata_key` ON `object_user_metadata`(`db_object_id`,`db_multipart_upload_id`,`key`); +-- dbHostInfo +CREATE TABLE `host_infos` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `created_at` datetime, `db_autopilot_id` INTEGER NOT NULL, `db_host_id` INTEGER NOT NULL, `usability_blocked` INTEGER NOT NULL DEFAULT 0, `usability_offline` INTEGER NOT NULL DEFAULT 0, `usability_low_score` INTEGER NOT NULL DEFAULT 0, `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, `usability_gouging` INTEGER NOT NULL DEFAULT 0, `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, `usability_not_announced` INTEGER NOT NULL DEFAULT 0, `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, `score_age` REAL NOT NULL, `score_collateral` REAL NOT NULL, `score_interactions` REAL NOT NULL, `score_storage_remaining` REAL NOT NULL, `score_uptime` REAL NOT NULL, `score_version` REAL NOT NULL, `score_prices` REAL NOT NULL, `gouging_contract_err` TEXT, `gouging_download_err` TEXT, `gouging_gouging_err` TEXT, `gouging_prune_err` TEXT, `gouging_upload_err` TEXT, FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE); +CREATE UNIQUE INDEX `idx_host_infos_id` ON `host_infos` (`db_autopilot_id`, `db_host_id`); +CREATE INDEX `idx_host_infos_usability_blocked` ON `host_infos` (`usability_blocked`); +CREATE INDEX `idx_host_infos_usability_offline` ON `host_infos` (`usability_offline`); +CREATE INDEX `idx_host_infos_usability_low_score` ON `host_infos` (`usability_low_score`); +CREATE INDEX `idx_host_infos_usability_redundant_ip` ON `host_infos` (`usability_redundant_ip`); +CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_gouging`); +CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); +CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); +CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); +CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); +CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); +CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); +CREATE INDEX `idx_host_infos_score_storage_remaining` ON `host_infos` (`score_storage_remaining`); +CREATE INDEX `idx_host_infos_score_uptime` ON `host_infos` (`score_uptime`); +CREATE INDEX `idx_host_infos_score_version` ON `host_infos` (`score_version`); +CREATE INDEX `idx_host_infos_score_prices` ON `host_infos` (`score_prices`); + -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); From 3bf76384ec8cb76442c0a9f1d632d2b08d846f31 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 14:19:24 +0100 Subject: [PATCH 093/201] stores: extend TestHostInfo with assertions for CASCADE DELETE --- stores/hostdb.go | 1 - stores/hostdb_test.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/stores/hostdb.go b/stores/hostdb.go index c344fd83f..5f7732fcd 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -648,7 +648,6 @@ func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string, filterMod // fetch host info var infos []dbHostInfo if err := query. - Debug(). Offset(offset). Limit(limit). Find(&infos). diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 8a75caf6d..c9ab2ba7e 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -1247,6 +1247,37 @@ func TestHostInfo(t *testing.T) { } else if his[0].Host.PublicKey != (types.PublicKey{1}) { t.Fatal("unexpected", his) } + + // assert cascade delete on host + err = ss.db.Exec("DELETE FROM hosts WHERE public_key = ?", publicKey(types.PublicKey{1})).Error + if err != nil { + t.Fatal(err) + } + his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 0 { + t.Fatal("unexpected") + } + + // assert cascade delete on autopilot + var cnt uint64 + err = ss.db.Raw("SELECT COUNT(*) FROM host_infos").Scan(&cnt).Error + if err != nil { + t.Fatal(err) + } else if cnt == 0 { + t.Fatal("unexpected", cnt) + } + err = ss.db.Exec("DELETE FROM autopilots WHERE identifier = ?", "foo").Error + if err != nil { + t.Fatal(err) + } + err = ss.db.Raw("SELECT COUNT(*) FROM host_infos").Scan(&cnt).Error + if err != nil { + t.Fatal(err) + } else if cnt != 0 { + t.Fatal("unexpected", cnt) + } } // addTestHosts adds 'n' hosts to the db and returns their keys. From 864c08b7b12f5426b1775a3e7d0520deb3cec6dc Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 20 Mar 2024 15:02:36 +0100 Subject: [PATCH 094/201] contractor: fetch all hosts --- autopilot/contractor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 9c778004e..27cbd3834 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -240,7 +240,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch all hosts - hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) + hosts, err := c.ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAll}) if err != nil { return false, err } From d74817ed2e8b248ba9598a304d2820b07ed218fd Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 21 Mar 2024 10:42:08 +0100 Subject: [PATCH 095/201] stores: add host scopes --- stores/hostdb.go | 312 +++++++++++++++++++++-------------------------- stores/sql.go | 24 +++- 2 files changed, 157 insertions(+), 179 deletions(-) diff --git a/stores/hostdb.go b/stores/hostdb.go index 5f7732fcd..5de891649 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -85,30 +85,30 @@ type ( dbHostInfo struct { Model - DBAutopilotID uint `gorm:"index:idx_host_infos_id,unique"` + DBAutopilotID uint DBAutopilot dbAutopilot - DBHostID uint `gorm:"index:idx_host_infos_id,unique"` + DBHostID uint DBHost dbHost // usability - UsabilityBlocked bool `gorm:"index:idx_host_infos_usability_blocked"` - UsabilityOffline bool `gorm:"index:idx_host_infos_usability_offline"` - UsabilityLowScore bool `gorm:"index:idx_host_infos_usability_low_score"` - UsabilityRedundantIP bool `gorm:"index:idx_host_infos_usability_redundant_ip"` - UsabilityGouging bool `gorm:"index:idx_host_infos_usability_gouging"` - UsabilityNotAcceptingContracts bool `gorm:"index:idx_host_infos_usability_not_accepting_contracts"` - UsabilityNotAnnounced bool `gorm:"index:idx_host_infos_usability_not_announced"` - UsabilityNotCompletingScan bool `gorm:"index:idx_host_infos_usability_not_completing_scan"` + UsabilityBlocked bool + UsabilityOffline bool + UsabilityLowScore bool + UsabilityRedundantIP bool + UsabilityGouging bool + UsabilityNotAcceptingContracts bool + UsabilityNotAnnounced bool + UsabilityNotCompletingScan bool // score - ScoreAge float64 `gorm:"index:idx_host_infos_score_age"` - ScoreCollateral float64 `gorm:"index:idx_host_infos_score_collateral"` - ScoreInteractions float64 `gorm:"index:idx_host_infos_score_interactions"` - ScoreStorageRemaining float64 `gorm:"index:idx_host_infos_score_storage_remaining"` - ScoreUptime float64 `gorm:"index:idx_host_infos_score_uptime"` - ScoreVersion float64 `gorm:"index:idx_host_infos_score_version"` - ScorePrices float64 `gorm:"index:idx_host_infos_score_prices"` + ScoreAge float64 + ScoreCollateral float64 + ScoreInteractions float64 + ScoreStorageRemaining float64 + ScoreUptime float64 + ScoreVersion float64 + ScorePrices float64 // gouging GougingContractErr string @@ -373,36 +373,6 @@ func (hi dbHostInfo) convert() api.HostInfo { } } -func convertHostInfo(apID, hID uint, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) *dbHostInfo { - return &dbHostInfo{ - DBAutopilotID: apID, - DBHostID: hID, - - UsabilityBlocked: usability.Blocked, - UsabilityOffline: usability.Offline, - UsabilityLowScore: usability.LowScore, - UsabilityRedundantIP: usability.RedundantIP, - UsabilityGouging: usability.Gouging, - UsabilityNotAcceptingContracts: usability.NotAcceptingContracts, - UsabilityNotAnnounced: usability.NotAnnounced, - UsabilityNotCompletingScan: usability.NotCompletingScan, - - ScoreAge: score.Age, - ScoreCollateral: score.Collateral, - ScoreInteractions: score.Interactions, - ScoreStorageRemaining: score.StorageRemaining, - ScoreUptime: score.Uptime, - ScoreVersion: score.Version, - ScorePrices: score.Prices, - - GougingContractErr: gouging.ContractErr, - GougingDownloadErr: gouging.DownloadErr, - GougingGougingErr: gouging.GougingErr, - GougingPruneErr: gouging.PruneErr, - GougingUploadErr: gouging.UploadErr, - } -} - func (h *dbHost) BeforeCreate(tx *gorm.DB) (err error) { tx.Statement.AddClause(clause.OnConflict{ Columns: []clause.Column{{Name: "public_key"}}, @@ -531,45 +501,31 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.H func (ss *SQLStore) HostInfo(ctx context.Context, autopilotID string, hk types.PublicKey) (hi api.HostInfo, err error) { err = ss.db.Transaction(func(tx *gorm.DB) error { - // fetch ap id - var apID uint - if err := tx. - Model(&dbAutopilot{}). - Where("identifier = ?", autopilotID). - Select("id"). - Take(&apID). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrAutopilotNotFound - } else if err != nil { - return err - } - - // fetch host id - var hID uint - if err := tx. - Model(&dbHost{}). - Where("public_key = ?", publicKey(hk)). - Select("id"). - Take(&hID). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrHostNotFound - } else if err != nil { - return err - } - - // fetch host info var entity dbHostInfo if err := tx. Model(&dbHostInfo{}). - Where("db_autopilot_id = ? AND db_host_id = ?", apID, hID). + Where("db_autopilot_id = (?)", gorm.Expr("SELECT id FROM autopilots WHERE identifier = ?", autopilotID)). + Where("db_host_id = (?)", gorm.Expr("SELECT id FROM hosts WHERE public_key = ?", publicKey(hk))). Preload("DBHost"). First(&entity). Error; errors.Is(err, gorm.ErrRecordNotFound) { + if err := tx. + Model(&dbAutopilot{}). + Where("identifier = ?", autopilotID). + First(nil). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrAutopilotNotFound + } else if err := tx. + Model(&dbHost{}). + Where("public_key = ?", publicKey(hk)). + First(nil). + Error; errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrHostNotFound + } return api.ErrHostInfoNotFound } else if err != nil { return err } - hi = entity.convert() return nil }) @@ -601,49 +557,13 @@ func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string, filterMod Where("db_autopilot_id = ?", apID). Joins("DBHost") - // apply mode filter - switch filterMode { - case api.HostFilterModeAllowed: - query = query.Scopes(ss.excludeBlocked("DBHost")) - case api.HostFilterModeBlocked: - query = query.Scopes(ss.excludeAllowed("DBHost")) - case api.HostFilterModeAll: - // nothing to do - default: - return fmt.Errorf("invalid filter mode: %v", filterMode) - } - - // apply usability filter - switch usabilityMode { - case api.UsabilityFilterModeUsable: - query = query.Where("usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ?", - false, false, false, false, false, false, false, false) - case api.UsabilityFilterModeUnusable: - query = query.Where("usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ?", - true, true, true, true, true, true, true, true) - case api.UsabilityFilterModeAll: - // nothing to do - default: - return fmt.Errorf("invalid usability mode: %v", usabilityMode) - } - - // apply address filter - if addressContains != "" { - query = query.Scopes(func(d *gorm.DB) *gorm.DB { - return d.Where("net_address LIKE ?", "%"+addressContains+"%") - }) - } - - // apply key filter - if len(keyIn) > 0 { - pubKeys := make([]publicKey, len(keyIn)) - for i, pk := range keyIn { - pubKeys[i] = publicKey(pk) - } - query = query.Scopes(func(d *gorm.DB) *gorm.DB { - return d.Where("public_key IN ?", pubKeys) - }) - } + // apply filters + query = query.Scopes( + hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist(), "DBHost"), + hostUsabilityFilter(usabilityMode), + hostNetAddress(addressContains), + hostPublicKey(keyIn), + ) // fetch host info var infos []dbHostInfo @@ -696,7 +616,33 @@ func (ss *SQLStore) UpdateHostInfo(ctx context.Context, autopilotID string, hk t Columns: []clause.Column{{Name: "db_autopilot_id"}, {Name: "db_host_id"}}, UpdateAll: true, }). - Create(convertHostInfo(apID, hID, gouging, score, usability)). + Create(&dbHostInfo{ + DBAutopilotID: apID, + DBHostID: hID, + + UsabilityBlocked: usability.Blocked, + UsabilityOffline: usability.Offline, + UsabilityLowScore: usability.LowScore, + UsabilityRedundantIP: usability.RedundantIP, + UsabilityGouging: usability.Gouging, + UsabilityNotAcceptingContracts: usability.NotAcceptingContracts, + UsabilityNotAnnounced: usability.NotAnnounced, + UsabilityNotCompletingScan: usability.NotCompletingScan, + + ScoreAge: score.Age, + ScoreCollateral: score.Collateral, + ScoreInteractions: score.Interactions, + ScoreStorageRemaining: score.StorageRemaining, + ScoreUptime: score.Uptime, + ScoreVersion: score.Version, + ScorePrices: score.Prices, + + GougingContractErr: gouging.ContractErr, + GougingDownloadErr: gouging.DownloadErr, + GougingGougingErr: gouging.GougingErr, + GougingPruneErr: gouging.PruneErr, + GougingUploadErr: gouging.UploadErr, + }). Error }) return @@ -742,40 +688,27 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains return nil, ErrNegativeOffset } - // Apply filter mode. - var blocked bool - query := ss.db + // validate filterMode switch filterMode { case api.HostFilterModeAllowed: - query = query.Scopes(ss.excludeBlocked("hosts")) case api.HostFilterModeBlocked: - query = query.Scopes(ss.excludeAllowed("hosts")) - blocked = true case api.HostFilterModeAll: - // preload allowlist and blocklist - query = query. - Preload("Allowlist"). - Preload("Blocklist") default: return nil, fmt.Errorf("invalid filter mode: %v", filterMode) } - // Add address filter. - if addressContains != "" { - query = query.Scopes(func(d *gorm.DB) *gorm.DB { - return d.Where("net_address LIKE ?", "%"+addressContains+"%") - }) - } + // prepare query + query := ss.db.Scopes( + hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist(), "hosts"), + hostNetAddress(addressContains), + hostPublicKey(keyIn), + ) - // Only search for specific hosts. - if len(keyIn) > 0 { - pubKeys := make([]publicKey, len(keyIn)) - for i, pk := range keyIn { - pubKeys[i] = publicKey(pk) - } - query = query.Scopes(func(d *gorm.DB) *gorm.DB { - return d.Where("public_key IN ?", pubKeys) - }) + // preload allowlist and blocklist + if filterMode == api.HostFilterModeAll { + query = query. + Preload("Allowlist"). + Preload("Blocklist") } var hosts []hostdb.HostInfo @@ -793,7 +726,7 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains } else { hosts = append(hosts, hostdb.HostInfo{ Host: fh.convert(), - Blocked: blocked, + Blocked: filterMode == api.HostFilterModeBlocked, }) } } @@ -1206,40 +1139,73 @@ func (ss *SQLStore) processConsensusChangeHostDB(cc modules.ConsensusChange) { ss.unappliedAnnouncements = append(ss.unappliedAnnouncements, newAnnouncements...) } -// excludeBlocked can be used as a scope for a db transaction to exclude blocked -// hosts. -func (ss *SQLStore) excludeBlocked(alias string) func(db *gorm.DB) *gorm.DB { +// hostNetAddress can be used as a scope to filter hosts by their net address. +func hostNetAddress(addressContains string) func(*gorm.DB) *gorm.DB { return func(db *gorm.DB) *gorm.DB { - ss.mu.Lock() - defer ss.mu.Unlock() - - if ss.hasAllowlist { - db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) - } - if ss.hasBlocklist { - db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + if addressContains != "" { + return db.Where("net_address LIKE ?", "%"+addressContains+"%") } return db } } -// excludeAllowed can be used as a scope for a db transaction to exclude allowed -// hosts. -func (ss *SQLStore) excludeAllowed(alias string) func(db *gorm.DB) *gorm.DB { +func hostPublicKey(keyIn []types.PublicKey) func(*gorm.DB) *gorm.DB { return func(db *gorm.DB) *gorm.DB { - ss.mu.Lock() - defer ss.mu.Unlock() - - if ss.hasAllowlist { - db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + if len(keyIn) > 0 { + pubKeys := make([]publicKey, len(keyIn)) + for i, pk := range keyIn { + pubKeys[i] = publicKey(pk) + } + return db.Where("public_key IN ?", pubKeys) } - if ss.hasBlocklist { - db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", alias)) + return db + } +} + +// hostFilter can be used as a scope to filter hosts based on their filter mode, +// returning either all, allowed or blocked hosts. +func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool, hostTableAlias string) func(*gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + switch filterMode { + case api.HostFilterModeAllowed: + if hasAllowlist { + db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + } + if hasBlocklist { + db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + } + case api.HostFilterModeBlocked: + if hasAllowlist { + db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + } + if hasBlocklist { + db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + } + if !hasAllowlist && !hasBlocklist { + // if neither an allowlist nor a blocklist exist, all hosts are allowed + // which means we return none + db = db.Where("1 = 0") + } + case api.HostFilterModeAll: + // do nothing } - if !ss.hasAllowlist && !ss.hasBlocklist { - // if neither an allowlist nor a blocklist exist, all hosts are allowed - // which means we return none - db = db.Where("1 = 0") + return db + } +} + +// hostUsabilityFilter can be used as a scope to filter hosts based on their +// usability mode, return either all, usable or unusable hosts. hosts. +func hostUsabilityFilter(usabilityMode string) func(*gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + switch usabilityMode { + case api.UsabilityFilterModeUsable: + db = db.Where("usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ?", + false, false, false, false, false, false, false, false) + case api.UsabilityFilterModeUnusable: + db.Where("usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ?", + true, true, true, true, true, true, true, true) + case api.UsabilityFilterModeAll: + // nothing to do } return db } @@ -1249,10 +1215,10 @@ func (ss *SQLStore) isBlocked(h dbHost) (blocked bool) { ss.mu.Lock() defer ss.mu.Unlock() - if ss.hasAllowlist && len(h.Allowlist) == 0 { + if ss.allowListCnt > 0 && len(h.Allowlist) == 0 { blocked = true } - if ss.hasBlocklist && len(h.Blocklist) > 0 { + if ss.blockListCnt > 0 && len(h.Blocklist) > 0 { blocked = true } return diff --git a/stores/sql.go b/stores/sql.go index f62dba97f..34a6d78ab 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -104,8 +104,8 @@ type ( wg sync.WaitGroup mu sync.Mutex - hasAllowlist bool - hasBlocklist bool + allowListCnt uint64 + blockListCnt uint64 closed bool knownContracts map[types.FileContractID]struct{} @@ -258,8 +258,8 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { knownContracts: isOurContract, lastSave: time.Now(), persistInterval: cfg.PersistInterval, - hasAllowlist: allowlistCnt > 0, - hasBlocklist: blocklistCnt > 0, + allowListCnt: uint64(allowlistCnt), + blockListCnt: uint64(blocklistCnt), settings: make(map[string]string), slabPruneSigChan: make(chan struct{}, 1), unappliedContractState: make(map[types.FileContractID]contractState), @@ -299,6 +299,12 @@ func isSQLite(db *gorm.DB) bool { } } +func (ss *SQLStore) hasAllowlist() bool { + ss.mu.Lock() + defer ss.mu.Unlock() + return ss.allowListCnt > 0 +} + func (ss *SQLStore) updateHasAllowlist(err *error) { if *err != nil { return @@ -311,10 +317,16 @@ func (ss *SQLStore) updateHasAllowlist(err *error) { } ss.mu.Lock() - ss.hasAllowlist = cnt > 0 + ss.allowListCnt = uint64(cnt) ss.mu.Unlock() } +func (ss *SQLStore) hasBlocklist() bool { + ss.mu.Lock() + defer ss.mu.Unlock() + return ss.blockListCnt > 0 +} + func (ss *SQLStore) updateHasBlocklist(err *error) { if *err != nil { return @@ -327,7 +339,7 @@ func (ss *SQLStore) updateHasBlocklist(err *error) { } ss.mu.Lock() - ss.hasBlocklist = cnt > 0 + ss.blockListCnt = uint64(cnt) ss.mu.Unlock() } From 5bb4a648caaa4ce65566da6bfd4528a5c633d0f5 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 21 Mar 2024 10:54:47 +0100 Subject: [PATCH 096/201] cmd: fix comment on logger flag --- cmd/renterd/main.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 98b257ddf..1b0cce80f 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -263,7 +263,7 @@ func main() { tryLoadConfig() // deprecated - these go first so that they can be overwritten by the non-deprecated flags - flag.StringVar(&cfg.Log.Database.Level, "db.logger.logLevel", cfg.Log.Level, "(deprecated) Logger level (overrides with RENTERD_DB_LOGGER_LOG_LEVEL)") + flag.StringVar(&cfg.Log.Database.Level, "db.logger.logLevel", cfg.Log.Database.Level, "(deprecated) Logger level (overrides with RENTERD_DB_LOGGER_LOG_LEVEL)") flag.BoolVar(&cfg.Database.Log.IgnoreRecordNotFoundError, "db.logger.ignoreNotFoundError", cfg.Database.Log.IgnoreRecordNotFoundError, "(deprecated) Ignores 'not found' errors in logger (overrides with RENTERD_DB_LOGGER_IGNORE_NOT_FOUND_ERROR)") flag.DurationVar(&cfg.Database.Log.SlowThreshold, "db.logger.slowThreshold", cfg.Database.Log.SlowThreshold, "(deprecated) Threshold for slow queries in logger (overrides with RENTERD_DB_LOGGER_SLOW_THRESHOLD)") flag.StringVar(&cfg.Log.Path, "log-path", cfg.Log.Path, "(deprecated) Path to directory for logs (overrides with RENTERD_LOG_PATH)") @@ -273,7 +273,7 @@ func main() { flag.StringVar(&cfg.Directory, "dir", cfg.Directory, "Directory for storing node state") // logger - flag.StringVar(&cfg.Log.Level, "log.level", cfg.Log.Level, "Global logger level (info|warn|error). Defaults to 'info' (overrides with RENTERD_LOG_LEVEL)") + flag.StringVar(&cfg.Log.Level, "log.level", cfg.Log.Level, "Global logger level (debug|info|warn|error). Defaults to 'info' (overrides with RENTERD_LOG_LEVEL)") flag.BoolVar(&cfg.Log.File.Enabled, "log.file.enabled", cfg.Log.File.Enabled, "Enables logging to disk. Defaults to 'true'. (overrides with RENTERD_LOG_FILE_ENABLED)") flag.StringVar(&cfg.Log.File.Format, "log.file.format", cfg.Log.File.Format, "Format of log file (json|human). Defaults to 'json' (overrides with RENTERD_LOG_FILE_FORMAT)") flag.StringVar(&cfg.Log.File.Path, "log.file.path", cfg.Log.File.Path, "Path of log file. Defaults to 'renterd.log' within the renterd directory. (overrides with RENTERD_LOG_FILE_PATH)") @@ -463,6 +463,8 @@ func main() { level = logger.Warn case "info": level = logger.Info + case "debug": + level = logger.Info default: log.Fatalf("invalid log level %q, options are: silent, error, warn, info", cfg.Log.Level) } From c14833682b33f95d632570ae99a591ff2376c9ef Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 21 Mar 2024 11:07:31 +0100 Subject: [PATCH 097/201] cmd: fix log compat --- cmd/renterd/main.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 1b0cce80f..03c27629f 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -84,7 +84,7 @@ var ( }, Log: config.Log{ Path: "", // deprecated. included for compatibility. - Level: "info", + Level: "", File: config.LogFile{ Enabled: true, Format: "json", @@ -281,7 +281,7 @@ func main() { flag.StringVar(&cfg.Log.StdOut.Format, "log.stdout.format", cfg.Log.StdOut.Format, "Format of log output (json|human). Defaults to 'human' (overrides with RENTERD_LOG_STDOUT_FORMAT)") flag.BoolVar(&cfg.Log.StdOut.EnableANSI, "log.stdout.enableANSI", cfg.Log.StdOut.EnableANSI, "Enables ANSI color codes in log output. Defaults to 'true' on non-Windows systems. (overrides with RENTERD_LOG_STDOUT_ENABLE_ANSI)") flag.BoolVar(&cfg.Log.Database.Enabled, "log.database.enabled", cfg.Log.Database.Enabled, "Enable logging database queries. Defaults to 'true' (overrides with RENTERD_LOG_DATABASE_ENABLED)") - flag.StringVar(&cfg.Log.Database.Level, "log.database.level", cfg.Log.Database.Level, "Logger level for database queries (info|warn|error). Defaults to 'info' (overrides with RENTERD_LOG_DATABASE_LEVEL)") + flag.StringVar(&cfg.Log.Database.Level, "log.database.level", cfg.Log.Database.Level, "Logger level for database queries (info|warn|error). Defaults to 'warn' (overrides with RENTERD_LOG_LEVEL and RENTERD_LOG_DATABASE_LEVEL)") flag.BoolVar(&cfg.Log.Database.IgnoreRecordNotFoundError, "log.database.ignoreRecordNotFoundError", cfg.Log.Database.IgnoreRecordNotFoundError, "Enable ignoring 'not found' errors resulting from database queries. Defaults to 'true' (overrides with RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR)") flag.DurationVar(&cfg.Log.Database.SlowThreshold, "log.database.slowThreshold", cfg.Log.Database.SlowThreshold, "Threshold for slow queries in logger. Defaults to 100ms (overrides with RENTERD_LOG_DATABASE_SLOW_THRESHOLD)") @@ -457,6 +457,8 @@ func main() { } var level logger.LogLevel switch strings.ToLower(lvlStr) { + case "": + level = logger.Warn // default to 'warn' if not set case "error": level = logger.Error case "warn": @@ -473,6 +475,9 @@ func main() { } // Create logger. + if cfg.Log.Level == "" { + cfg.Log.Level = "info" // default to 'info' if not set + } logger, closeFn, err := NewLogger(cfg.Directory, cfg.Log) if err != nil { log.Fatalln("failed to create logger:", err) From d483b0037f7fc5c7febe8430768302dcdb0c961e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 21 Mar 2024 11:23:27 +0100 Subject: [PATCH 098/201] stores: add logging; worker: add debug logging for pruning; cmd: add name to sql logger; contractor; break revision errors up into multiple log lines --- api/worker.go | 5 ++++- autopilot/contractor.go | 6 ++++-- cmd/renterd/main.go | 2 +- stores/metadata.go | 6 ++++++ worker/rhpv2.go | 34 +++++++++++++++++++++++++++++----- worker/worker.go | 4 ++++ 6 files changed, 48 insertions(+), 9 deletions(-) diff --git a/api/worker.go b/api/worker.go index 39f075718..87f736da3 100644 --- a/api/worker.go +++ b/api/worker.go @@ -50,7 +50,10 @@ type ( // ContractsResponse is the response type for the /rhp/contracts endpoint. ContractsResponse struct { Contracts []Contract `json:"contracts"` - Error string `json:"error,omitempty"` + Errors map[types.PublicKey]string + + // deprecated + Error string `json:"error,omitempty"` } MemoryResponse struct { diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 9bfc8055e..4f4df19bd 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -220,8 +220,10 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( if err != nil { return false, err } - if resp.Error != "" { - c.logger.Error(resp.Error) + if resp.Errors != nil { + for pk, err := range resp.Errors { + c.logger.With("hostKey", pk).With("error", err).Warn("failed to fetch revision") + } } contracts := resp.Contracts c.logger.Infof("fetched %d contracts from the worker, took %v", len(resp.Contracts), time.Since(start)) diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 03c27629f..bdb1dc423 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -492,7 +492,7 @@ func main() { dbLogCfg = cfg.Database.Log } busCfg.DBLogger = zapgorm2.Logger{ - ZapLogger: logger, + ZapLogger: logger.Named("SQL"), LogLevel: level, SlowThreshold: dbLogCfg.SlowThreshold, SkipCallerLookup: false, diff --git a/stores/metadata.go b/stores/metadata.go index e44cb3a63..3332a588e 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2226,6 +2226,12 @@ func (s *SQLStore) createSlices(tx *gorm.DB, objID, multiPartID *uint, contractS DBSectorID: sectorID, DBContractID: contracts[fcid].ID, }) + } else { + s.logger.Warn("missing contract for shard", + "contract", fcid, + "root", shard.Root, + "latest_host", shard.LatestHost, + ) } } } diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 49af73eb7..e73e9fdfa 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -284,6 +284,14 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types err = w.withContractLock(ctx, fcid, lockingPriorityPruning, func() error { return w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { return w.withRevisionV2(defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { + logger := w.logger. + With("hostKey", hostKey). + With("hostVersion", settings.Version). + With("fcid", fcid). + With("revisionNumber", rev.Revision.RevisionNumber). + With("lastKnownRevisionNumber", lastKnownRevisionNumber). + Named("pruneContract") + // perform gouging checks gc, err := GougingCheckerFromContext(ctx, false) if err != nil { @@ -316,6 +324,7 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types delete(keep, root) // prevent duplicates continue } + logger.With("index", i).With("root", root).Debug("collected root for pruning") indices = append(indices, uint64(i)) } if len(indices) == 0 { @@ -339,7 +348,14 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types } func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, indices []uint64) (deleted uint64, err error) { - w.logger.Infow(fmt.Sprintf("deleting %d contract roots (%v)", len(indices), humanReadableSize(len(indices)*rhpv2.SectorSize)), "hk", rev.HostKey(), "fcid", rev.ID()) + logger := w.logger. + With("hostKey", t.HostKey()). + With("hostVersion", settings.Version). + With("fcid", rev.Revision.ParentID). + With("revisionNumber", rev.Revision.RevisionNumber). + Named("deleteContractRoots") + + logger.Infow(fmt.Sprintf("deleting %d contract roots (%v)", len(indices), humanReadableSize(len(indices)*rhpv2.SectorSize)), "hk", rev.HostKey(), "fcid", rev.ID()) // return early if len(indices) == 0 { @@ -380,9 +396,9 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi if err = func() error { var cost types.Currency start := time.Now() - w.logger.Infow(fmt.Sprintf("starting batch %d/%d of size %d", i+1, len(batches), len(batch))) + logger.Infow(fmt.Sprintf("starting batch %d/%d of size %d", i+1, len(batches), len(batch))) defer func() { - w.logger.Infow(fmt.Sprintf("processing batch %d/%d of size %d took %v", i+1, len(batches), len(batch), time.Since(start)), "cost", cost) + logger.Infow(fmt.Sprintf("processing batch %d/%d of size %d took %v", i+1, len(batches), len(batch), time.Since(start)), "cost", cost) }() numSectors := rev.NumSectors() @@ -462,7 +478,7 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi return err } else if err := t.ReadResponse(&merkleResp, minMessageSize+responseSize); err != nil { err := fmt.Errorf("couldn't read Merkle proof response, err: %v", err) - w.logger.Infow(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) + logger.Infow(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) return err } @@ -472,7 +488,7 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi oldRoot, newRoot := types.Hash256(rev.Revision.FileMerkleRoot), merkleResp.NewMerkleRoot if rev.Revision.Filesize > 0 && !rhpv2.VerifyDiffProof(actions, numSectors, proofHashes, leafHashes, oldRoot, newRoot, nil) { err := fmt.Errorf("couldn't verify delete proof, host %v, version %v; %w", rev.HostKey(), settings.Version, ErrInvalidMerkleProof) - w.logger.Infow(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) + logger.Infow(fmt.Sprintf("processing batch %d/%d failed, err %v", i+1, len(batches), err)) t.WriteResponseErr(err) return err } @@ -506,6 +522,14 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi // record spending w.contractSpendingRecorder.Record(rev.Revision, api.ContractSpending{Deletions: cost}) + + for _, action := range actions { + if action.Type == rhpv2.RPCWriteActionSwap { + logger.With("index", action.B).Debug("successfully swapped sector") + } else if action.Type == rhpv2.RPCWriteActionTrim { + logger.With("n", action.A).Debug("successfully trimmed sectors") + } + } return nil }(); err != nil { return diff --git a/worker/worker.go b/worker/worker.go index 89fe37a14..8514c086d 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1298,6 +1298,10 @@ func (w *worker) rhpContractsHandlerGET(jc jape.Context) { resp := api.ContractsResponse{Contracts: contracts} if errs != nil { resp.Error = errs.Error() + resp.Errors = make(map[types.PublicKey]string) + for pk, err := range errs { + resp.Errors[pk] = err.Error() + } } jc.Encode(resp) } From 2f243637bb2d5dff53439994b8eec0eb40157ff8 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 21 Mar 2024 15:13:09 +0100 Subject: [PATCH 099/201] worker: add id to logger --- autopilot/accounts.go | 4 +++- worker/rhpv2.go | 20 +++++++++++--------- worker/upload.go | 3 +++ worker/uploader.go | 5 +++++ 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/autopilot/accounts.go b/autopilot/accounts.go index 690c2b35d..ec1e80558 100644 --- a/autopilot/accounts.go +++ b/autopilot/accounts.go @@ -147,7 +147,9 @@ func (a *accounts) refillWorkerAccounts(ctx context.Context, w Worker) { // register the alert if error is errMaxDriftExceeded a.ap.RegisterAlert(ctx, newAccountRefillAlert(accountID, contract, *rerr)) } - a.l.Errorw(rerr.err.Error(), rerr.keysAndValues...) + if _, inSet := inContractSet[contract.ID]; inSet { + a.l.Errorw(rerr.err.Error(), rerr.keysAndValues...) + } } else { // dismiss alerts on success a.ap.DismissAlert(ctx, alertIDForAccount(alertAccountRefillID, accountID)) diff --git a/worker/rhpv2.go b/worker/rhpv2.go index e73e9fdfa..1e3df8d0b 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -15,6 +15,8 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/siad/build" "go.sia.tech/siad/crypto" + "go.uber.org/zap" + "lukechampine.com/frand" ) const ( @@ -285,6 +287,7 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types return w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { return w.withRevisionV2(defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { logger := w.logger. + With("id", frand.Entropy128()). With("hostKey", hostKey). With("hostVersion", settings.Version). With("fcid", fcid). @@ -312,6 +315,12 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types if err != nil { return err } + for _, root := range pending { + logger.With("root", root).Debug("pending root") + } + for _, root := range want { + logger.With("root", root).Debug("wanted root") + } keep := make(map[types.Hash256]struct{}) for _, root := range append(want, pending...) { keep[root] = struct{}{} @@ -332,7 +341,7 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types } // delete the roots from the contract - deleted, err = w.deleteContractRoots(t, &rev, settings, indices) + deleted, err = w.deleteContractRoots(t, &rev, settings, logger, indices) if deleted < uint64(len(indices)) { remaining = uint64(len(indices)) - deleted } @@ -347,14 +356,7 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types return } -func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, indices []uint64) (deleted uint64, err error) { - logger := w.logger. - With("hostKey", t.HostKey()). - With("hostVersion", settings.Version). - With("fcid", rev.Revision.ParentID). - With("revisionNumber", rev.Revision.RevisionNumber). - Named("deleteContractRoots") - +func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, logger *zap.SugaredLogger, indices []uint64) (deleted uint64, err error) { logger.Infow(fmt.Sprintf("deleting %d contract roots (%v)", len(indices), humanReadableSize(len(indices)*rhpv2.SectorSize)), "hk", rev.HostKey(), "fcid", rev.ID()) // return early diff --git a/worker/upload.go b/worker/upload.go index d146b920e..8f13d2132 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -616,9 +616,11 @@ func (mgr *uploadManager) UploadShards(ctx context.Context, s *object.Slab, shar } // track the upload in the bus + logger := mgr.logger.With("uploadID", hex.EncodeToString(upload.id[:])) if err := mgr.os.TrackUpload(ctx, upload.id); err != nil { return fmt.Errorf("failed to track upload '%v', err: %w", upload.id, err) } + logger.Debug("tracking upload") // defer a function that finishes the upload defer func() { @@ -626,6 +628,7 @@ func (mgr *uploadManager) UploadShards(ctx context.Context, s *object.Slab, shar if err := mgr.os.FinishUpload(ctx, upload.id); err != nil { mgr.logger.Errorf("failed to mark upload %v as finished: %v", upload.id, err) } + logger.Debug("finished upload") cancel() }() diff --git a/worker/uploader.go b/worker/uploader.go index 403accbc8..6a4e0232e 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -2,6 +2,7 @@ package worker import ( "context" + "encoding/hex" "errors" "fmt" "math" @@ -230,9 +231,13 @@ func (u *uploader) execute(req *sectorUploadReq) (time.Duration, error) { } // update the bus + logger := u.logger.With("uploadID", hex.EncodeToString(req.uploadID[:])). + With("root", req.sector.root). + With("fcid", fcid) if err := u.os.AddUploadingSector(ctx, req.uploadID, fcid, req.sector.root); err != nil { return 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) } + logger.Debug("added uploading sector") // upload the sector start := time.Now() From df6641510e7deef0045b6f512d0058080da2a8be Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 21 Mar 2024 20:33:22 +0100 Subject: [PATCH 100/201] stores: unify HostInfos and SearchHosts --- api/host.go | 13 +- autopilot/autopilot.go | 16 +- autopilot/autopilot_test.go | 50 +-- autopilot/client.go | 18 +- autopilot/contractor.go | 12 +- autopilot/hostinfo.go | 7 +- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 10 +- bus/bus.go | 10 +- bus/client/hosts.go | 4 +- stores/hostdb.go | 231 ++++------- stores/hostdb_test.go | 375 +++++++----------- stores/metadata_test.go | 4 +- stores/migrations.go | 4 +- .../main/migration_00007_host_checks.sql | 52 +++ .../mysql/main/migration_00007_host_info.sql | 52 --- stores/migrations/mysql/main/schema.sql | 42 +- .../main/migration_00007_host_checks.sql | 52 +++ .../sqlite/main/migration_00007_host_info.sql | 52 --- stores/migrations/sqlite/main/schema.sql | 36 +- worker/mocks_test.go | 15 +- worker/worker.go | 2 +- 22 files changed, 468 insertions(+), 591 deletions(-) create mode 100644 stores/migrations/mysql/main/migration_00007_host_checks.sql delete mode 100644 stores/migrations/mysql/main/migration_00007_host_info.sql create mode 100644 stores/migrations/sqlite/main/migration_00007_host_checks.sql delete mode 100644 stores/migrations/sqlite/main/migration_00007_host_info.sql diff --git a/api/host.go b/api/host.go index 2c1e82f30..50b058642 100644 --- a/api/host.go +++ b/api/host.go @@ -47,11 +47,15 @@ type ( MinRecentScanFailures uint64 `json:"minRecentScanFailures"` } + HostsRequest struct { + UsabilityMode string `json:"usabilityMode"` + SearchHostsRequest + } + SearchHostsRequest struct { Offset int `json:"offset"` Limit int `json:"limit"` FilterMode string `json:"filterMode"` - UsabilityMode string `json:"usabilityMode"` AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` } @@ -88,6 +92,7 @@ type ( SearchHostOptions struct { AddressContains string FilterMode string + UsabilityMode string KeyIn []types.PublicKey Limit int Offset int @@ -117,7 +122,11 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { type ( HostInfo struct { - Host hostdb.Host `json:"host"` + hostdb.HostInfo + Checks map[string]HostCheck `json:"checks"` + } + + HostCheck struct { Gouging HostGougingBreakdown `json:"gouging"` Score HostScoreBreakdown `json:"score"` Usability HostUsabilityBreakdown `json:"usability"` diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 1038f4378..7562de960 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -53,10 +53,10 @@ type Bus interface { PrunableData(ctx context.Context) (prunableData api.ContractsPrunableDataResponse, err error) // hostdb - Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) + Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.HostInfo, error) // metrics RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error @@ -726,7 +726,7 @@ func (ap *Autopilot) stateHandlerGET(jc jape.Context) { } func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.SearchHostsRequest + var req api.HostsRequest if jc.Decode(&req) != nil { return } @@ -737,10 +737,10 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { jc.Encode(hosts) } -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (usables uint64) { +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.HostInfo) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { - usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) + usable, _ := isUsableHost(cfg, rs, gc, host.HostInfo, smallestValidScore, 0) if usable { usables++ } @@ -751,12 +751,12 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // evaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []hostdb.HostInfo) (resp api.ConfigEvaluationResponse) { +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.HostInfo) (resp api.ConfigEvaluationResponse) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) resp.Hosts = uint64(len(hosts)) for _, host := range hosts { - usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) + usable, usableBreakdown := isUsableHost(cfg, rs, gc, host.HostInfo, 0, 0) if usable { resp.Usable++ continue @@ -866,7 +866,7 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // optimiseGougingSetting tries to optimise one field of the gouging settings to // try and hit the target number of contracts. -func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []hostdb.HostInfo) bool { +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []api.HostInfo) bool { if cfg.Contracts.Amount == 0 { return true // nothing to do } diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index 9ebafe675..2da3fc7be 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -14,34 +14,36 @@ import ( func TestOptimiseGougingSetting(t *testing.T) { // create 10 hosts that should all be usable - var hosts []hostdb.HostInfo + var hosts []api.HostInfo for i := 0; i < 10; i++ { - hosts = append(hosts, hostdb.HostInfo{ - Host: hostdb.Host{ - KnownSince: time.Unix(0, 0), - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{ - CollateralCost: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), + hosts = append(hosts, api.HostInfo{ + HostInfo: hostdb.HostInfo{ + Host: hostdb.Host{ + KnownSince: time.Unix(0, 0), + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + }, }, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, }, - Settings: rhpv2.HostSettings{ - AcceptingContracts: true, - Collateral: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - Version: "1.6.0", - }, - Interactions: hostdb.Interactions{ - Uptime: time.Hour * 1000, - LastScan: time.Now(), - LastScanSuccess: true, - SecondToLastScanSuccess: true, - TotalScans: 100, - }, - LastAnnouncement: time.Unix(0, 0), - Scanned: true, + Blocked: false, }, - Blocked: false, }) } diff --git a/autopilot/client.go b/autopilot/client.go index ba16754a5..336149f8a 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -40,14 +40,16 @@ func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostHandlerResponse } // HostInfo returns information about all hosts. -func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode string, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostHandlerResponse, err error) { - err = c.c.POST("/hosts", api.SearchHostsRequest{ - Offset: offset, - Limit: limit, - FilterMode: filterMode, - UsabilityMode: usabilityMode, - AddressContains: addressContains, - KeyIn: keyIn, +func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostHandlerResponse, err error) { + err = c.c.POST("/hosts", api.HostsRequest{ + UsabilityMode: usabilityMode, + SearchHostsRequest: api.SearchHostsRequest{ + Offset: offset, + Limit: limit, + FilterMode: filterMode, + AddressContains: addressContains, + KeyIn: keyIn, + }, }, &resp) return } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 43ac5e629..d376d682e 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -295,7 +295,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( for _, h := range hosts { // ignore the pricetable's HostBlockHeight by setting it to our own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, h, minScore, hostData[h.PublicKey]) + isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, h.HostInfo, minScore, hostData[h.PublicKey]) hostInfos[h.PublicKey] = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, @@ -777,7 +777,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, contract.FileSize()) + usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.HostInfo, minScore, contract.FileSize()) if !usable { reasons := unusableResult.reasons() toStopUsing[fcid] = strings.Join(reasons, ",") @@ -1297,7 +1297,7 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *contractor) candidateHosts(ctx context.Context, hosts []api.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { start := time.Now() // fetch consensus state @@ -1311,7 +1311,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) // select unused hosts that passed a scan - var unused []hostdb.HostInfo + var unused []api.HostInfo var excluded, notcompletedscan int for _, h := range hosts { // filter out used hosts @@ -1346,7 +1346,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo // NOTE: ignore the pricetable's HostBlockHeight by setting it to our // own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - usable, result := isUsableHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) + usable, result := isUsableHost(state.cfg, state.rs, gc, h.HostInfo, minScore, storedData[h.PublicKey]) if usable { candidates = append(candidates, scoredHost{h.Host, result.scoreBreakdown.Score()}) continue @@ -1612,7 +1612,7 @@ func (c *contractor) tryPerformPruning(wp *workerPool) { }() } -func (c *contractor) hostForContract(ctx context.Context, fcid types.FileContractID) (host hostdb.HostInfo, metadata api.ContractMetadata, err error) { +func (c *contractor) hostForContract(ctx context.Context, fcid types.FileContractID) (host api.HostInfo, metadata api.ContractMetadata, err error) { // fetch the contract metadata, err = c.ap.bus.Contract(ctx, fcid) if err != nil { diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go index e0cbecadc..5af554e6b 100644 --- a/autopilot/hostinfo.go +++ b/autopilot/hostinfo.go @@ -6,7 +6,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/worker" ) @@ -53,7 +52,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.HostInfo, minScore, storedData) return api.HostHandlerResponse{ Host: host.Host, Checks: &api.HostHandlerResponseChecks{ @@ -67,7 +66,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api }, nil } -func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo) (hi hostInfo, found bool) { +func (c *contractor) hostInfoFromCache(ctx context.Context, host api.HostInfo) (hi hostInfo, found bool) { // grab host details from cache c.mu.Lock() hi, found = c.cachedHostInfo[host.PublicKey] @@ -90,7 +89,7 @@ func (c *contractor) hostInfoFromCache(ctx context.Context, host hostdb.HostInfo } else { state := c.ap.State() gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.HostInfo, minScore, storedData) hi = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, diff --git a/autopilot/scanner.go b/autopilot/scanner.go index 76643e5b5..a2d30abfa 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.HostInfo, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 1cdd096d2..027366662 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]hostdb.HostInfo, error) { +func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.HostInfo, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -32,9 +32,13 @@ func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ( end = len(b.hosts) } - his := make([]hostdb.HostInfo, len(b.hosts[start:end])) + his := make([]api.HostInfo, len(b.hosts[start:end])) for i, h := range b.hosts[start:end] { - his[i] = hostdb.HostInfo{Host: h} + his[i] = api.HostInfo{ + HostInfo: hostdb.HostInfo{ + Host: h, + }, + } } return his, nil } diff --git a/bus/bus.go b/bus/bus.go index 7d33964be..510353edd 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -91,13 +91,13 @@ type ( // A HostDB stores information about hosts. HostDB interface { - Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) + Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error - SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) + SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostInfo, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) @@ -775,9 +775,9 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { return } - // TODO: on the next major release - // - set defaults in handler - // - validate request params and return 400 if invalid + // TODO: on the next major release: + // - properly default search params + // - properly validate and return 400 hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.FilterMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 1ebf14e1f..460291dd6 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -12,7 +12,7 @@ import ( ) // Host returns information about a particular host known to the server. -func (c *Client) Host(ctx context.Context, hostKey types.PublicKey) (h hostdb.HostInfo, err error) { +func (c *Client) Host(ctx context.Context, hostKey types.PublicKey) (h api.HostInfo, err error) { err = c.c.WithContext(ctx).GET(fmt.Sprintf("/host/%s", hostKey), &h) return } @@ -78,7 +78,7 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) } // SearchHosts returns all hosts that match certain search criteria. -func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []hostdb.HostInfo, err error) { +func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []api.HostInfo, err error) { err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ Offset: opts.Offset, Limit: opts.Limit, diff --git a/stores/hostdb.go b/stores/hostdb.go index 5de891649..01a9e594e 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -78,11 +78,12 @@ type ( Allowlist []dbAllowlistEntry `gorm:"many2many:host_allowlist_entry_hosts;constraint:OnDelete:CASCADE"` Blocklist []dbBlocklistEntry `gorm:"many2many:host_blocklist_entry_hosts;constraint:OnDelete:CASCADE"` + Checks []dbHostCheck `gorm:"foreignKey:DBHostID;constraint:OnDelete:CASCADE"` } - // dbHostInfo contains information about a host that is collected and used + // dbHostCheck contains information about a host that is collected and used // by the autopilot. - dbHostInfo struct { + dbHostCheck struct { Model DBAutopilotID uint @@ -302,7 +303,7 @@ func (dbConsensusInfo) TableName() string { return "consensus_infos" } func (dbHost) TableName() string { return "hosts" } // TableName implements the gorm.Tabler interface. -func (dbHostInfo) TableName() string { return "host_infos" } +func (dbHostCheck) TableName() string { return "host_checks" } // TableName implements the gorm.Tabler interface. func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } @@ -310,40 +311,49 @@ func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } // TableName implements the gorm.Tabler interface. func (dbBlocklistEntry) TableName() string { return "host_blocklist_entries" } -// convert converts a host into a hostdb.Host. -func (h dbHost) convert() hostdb.Host { +// convert converts a host into a api.HostInfo +func (h dbHost) convert(blocked bool) api.HostInfo { var lastScan time.Time if h.LastScan > 0 { lastScan = time.Unix(0, h.LastScan) } - return hostdb.Host{ - KnownSince: h.CreatedAt, - LastAnnouncement: h.LastAnnouncement, - NetAddress: h.NetAddress, - Interactions: hostdb.Interactions{ - TotalScans: h.TotalScans, - LastScan: lastScan, - LastScanSuccess: h.LastScanSuccess, - SecondToLastScanSuccess: h.SecondToLastScanSuccess, - Uptime: h.Uptime, - Downtime: h.Downtime, - SuccessfulInteractions: h.SuccessfulInteractions, - FailedInteractions: h.FailedInteractions, - LostSectors: h.LostSectors, - }, - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: h.PriceTable.convert(), - Expiry: h.PriceTableExpiry.Time, + checks := make(map[string]api.HostCheck) + for _, check := range h.Checks { + checks[check.DBAutopilot.Identifier] = check.convert() + } + return api.HostInfo{ + HostInfo: hostdb.HostInfo{ + Host: hostdb.Host{ + KnownSince: h.CreatedAt, + LastAnnouncement: h.LastAnnouncement, + NetAddress: h.NetAddress, + Interactions: hostdb.Interactions{ + TotalScans: h.TotalScans, + LastScan: lastScan, + LastScanSuccess: h.LastScanSuccess, + SecondToLastScanSuccess: h.SecondToLastScanSuccess, + Uptime: h.Uptime, + Downtime: h.Downtime, + SuccessfulInteractions: h.SuccessfulInteractions, + FailedInteractions: h.FailedInteractions, + LostSectors: h.LostSectors, + }, + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: h.PriceTable.convert(), + Expiry: h.PriceTableExpiry.Time, + }, + PublicKey: types.PublicKey(h.PublicKey), + Scanned: h.Scanned, + Settings: h.Settings.convert(), + }, + Blocked: blocked, }, - PublicKey: types.PublicKey(h.PublicKey), - Scanned: h.Scanned, - Settings: h.Settings.convert(), + Checks: checks, } } -func (hi dbHostInfo) convert() api.HostInfo { - return api.HostInfo{ - Host: hi.DBHost.convert(), +func (hi dbHostCheck) convert() api.HostCheck { + return api.HostCheck{ Gouging: api.HostGougingBreakdown{ ContractErr: hi.GougingContractErr, DownloadErr: hi.GougingDownloadErr, @@ -478,7 +488,7 @@ func (e *dbBlocklistEntry) blocks(h dbHost) bool { } // Host returns information about a host. -func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) { +func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) { var h dbHost tx := ss.db. @@ -488,22 +498,19 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.H Preload("Blocklist"). Take(&h) if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return hostdb.HostInfo{}, api.ErrHostNotFound + return api.HostInfo{}, api.ErrHostNotFound } else if tx.Error != nil { - return hostdb.HostInfo{}, tx.Error + return api.HostInfo{}, tx.Error } - return hostdb.HostInfo{ - Host: h.convert(), - Blocked: ss.isBlocked(h), - }, nil + return h.convert(ss.isBlocked(h)), nil } func (ss *SQLStore) HostInfo(ctx context.Context, autopilotID string, hk types.PublicKey) (hi api.HostInfo, err error) { err = ss.db.Transaction(func(tx *gorm.DB) error { - var entity dbHostInfo + var entity dbHostCheck if err := tx. - Model(&dbHostInfo{}). + Model(&dbHostCheck{}). Where("db_autopilot_id = (?)", gorm.Expr("SELECT id FROM autopilots WHERE identifier = ?", autopilotID)). Where("db_host_id = (?)", gorm.Expr("SELECT id FROM hosts WHERE public_key = ?", publicKey(hk))). Preload("DBHost"). @@ -526,63 +533,13 @@ func (ss *SQLStore) HostInfo(ctx context.Context, autopilotID string, hk types.P } else if err != nil { return err } - hi = entity.convert() - return nil - }) - return -} - -func (ss *SQLStore) HostInfos(ctx context.Context, autopilotID string, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (his []api.HostInfo, err error) { - if offset < 0 { - return nil, ErrNegativeOffset - } - - err = ss.db.Transaction(func(tx *gorm.DB) error { - // fetch ap id - var apID uint - if err := tx. - Model(&dbAutopilot{}). - Where("identifier = ?", autopilotID). - Select("id"). - Take(&apID). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrAutopilotNotFound - } else if err != nil { - return err - } - - // prepare query - query := tx. - Model(&dbHostInfo{}). - Where("db_autopilot_id = ?", apID). - Joins("DBHost") - - // apply filters - query = query.Scopes( - hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist(), "DBHost"), - hostUsabilityFilter(usabilityMode), - hostNetAddress(addressContains), - hostPublicKey(keyIn), - ) - - // fetch host info - var infos []dbHostInfo - if err := query. - Offset(offset). - Limit(limit). - Find(&infos). - Error; err != nil { - return err - } - for _, hi := range infos { - his = append(his, hi.convert()) - } + // hi = entity.convert() return nil }) return } -func (ss *SQLStore) UpdateHostInfo(ctx context.Context, autopilotID string, hk types.PublicKey, gouging api.HostGougingBreakdown, score api.HostScoreBreakdown, usability api.HostUsabilityBreakdown) (err error) { +func (ss *SQLStore) UpdateHostCheck(ctx context.Context, autopilotID string, hk types.PublicKey, hc api.HostCheck) (err error) { err = ss.db.Transaction(func(tx *gorm.DB) error { // fetch ap id var apID uint @@ -616,32 +573,32 @@ func (ss *SQLStore) UpdateHostInfo(ctx context.Context, autopilotID string, hk t Columns: []clause.Column{{Name: "db_autopilot_id"}, {Name: "db_host_id"}}, UpdateAll: true, }). - Create(&dbHostInfo{ + Create(&dbHostCheck{ DBAutopilotID: apID, DBHostID: hID, - UsabilityBlocked: usability.Blocked, - UsabilityOffline: usability.Offline, - UsabilityLowScore: usability.LowScore, - UsabilityRedundantIP: usability.RedundantIP, - UsabilityGouging: usability.Gouging, - UsabilityNotAcceptingContracts: usability.NotAcceptingContracts, - UsabilityNotAnnounced: usability.NotAnnounced, - UsabilityNotCompletingScan: usability.NotCompletingScan, - - ScoreAge: score.Age, - ScoreCollateral: score.Collateral, - ScoreInteractions: score.Interactions, - ScoreStorageRemaining: score.StorageRemaining, - ScoreUptime: score.Uptime, - ScoreVersion: score.Version, - ScorePrices: score.Prices, - - GougingContractErr: gouging.ContractErr, - GougingDownloadErr: gouging.DownloadErr, - GougingGougingErr: gouging.GougingErr, - GougingPruneErr: gouging.PruneErr, - GougingUploadErr: gouging.UploadErr, + UsabilityBlocked: hc.Usability.Blocked, + UsabilityOffline: hc.Usability.Offline, + UsabilityLowScore: hc.Usability.LowScore, + UsabilityRedundantIP: hc.Usability.RedundantIP, + UsabilityGouging: hc.Usability.Gouging, + UsabilityNotAcceptingContracts: hc.Usability.NotAcceptingContracts, + UsabilityNotAnnounced: hc.Usability.NotAnnounced, + UsabilityNotCompletingScan: hc.Usability.NotCompletingScan, + + ScoreAge: hc.Score.Age, + ScoreCollateral: hc.Score.Collateral, + ScoreInteractions: hc.Score.Interactions, + ScoreStorageRemaining: hc.Score.StorageRemaining, + ScoreUptime: hc.Score.Uptime, + ScoreVersion: hc.Score.Version, + ScorePrices: hc.Score.Prices, + + GougingContractErr: hc.Gouging.ContractErr, + GougingDownloadErr: hc.Gouging.DownloadErr, + GougingGougingErr: hc.Gouging.GougingErr, + GougingPruneErr: hc.Gouging.PruneErr, + GougingUploadErr: hc.Gouging.UploadErr, }). Error }) @@ -683,7 +640,7 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, return hostAddresses, err } -func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]hostdb.HostInfo, error) { +func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostInfo, error) { if offset < 0 { return nil, ErrNegativeOffset } @@ -698,11 +655,13 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains } // prepare query - query := ss.db.Scopes( - hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist(), "hosts"), - hostNetAddress(addressContains), - hostPublicKey(keyIn), - ) + query := ss.db. + Model(&dbHost{}). + Scopes( + hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist(), "hosts"), + hostNetAddress(addressContains), + hostPublicKey(keyIn), + ).Preload("Checks.DBAutopilot") // preload allowlist and blocklist if filterMode == api.HostFilterModeAll { @@ -711,24 +670,20 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains Preload("Blocklist") } - var hosts []hostdb.HostInfo + var hosts []api.HostInfo var fullHosts []dbHost err := query. Offset(offset). Limit(limit). FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { for _, fh := range fullHosts { + var blocked bool if filterMode == api.HostFilterModeAll { - hosts = append(hosts, hostdb.HostInfo{ - Host: fh.convert(), - Blocked: ss.isBlocked(fh), - }) + blocked = ss.isBlocked(fh) } else { - hosts = append(hosts, hostdb.HostInfo{ - Host: fh.convert(), - Blocked: filterMode == api.HostFilterModeBlocked, - }) + blocked = filterMode == api.HostFilterModeBlocked } + hosts = append(hosts, fh.convert(blocked)) } return nil }). @@ -740,7 +695,7 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains } // Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]hostdb.HostInfo, error) { +func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]api.HostInfo, error) { return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) } @@ -1193,24 +1148,6 @@ func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool, hostTableAli } } -// hostUsabilityFilter can be used as a scope to filter hosts based on their -// usability mode, return either all, usable or unusable hosts. hosts. -func hostUsabilityFilter(usabilityMode string) func(*gorm.DB) *gorm.DB { - return func(db *gorm.DB) *gorm.DB { - switch usabilityMode { - case api.UsabilityFilterModeUsable: - db = db.Where("usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ?", - false, false, false, false, false, false, false, false) - case api.UsabilityFilterModeUnusable: - db.Where("usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ?", - true, true, true, true, true, true, true, true) - case api.UsabilityFilterModeAll: - // nothing to do - } - return db - } -} - func (ss *SQLStore) isBlocked(h dbHost) (blocked bool) { ss.mu.Lock() defer ss.mu.Unlock() diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index c9ab2ba7e..ec3bc17be 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -248,30 +248,163 @@ func TestSearchHosts(t *testing.T) { // add 3 hosts var hks []types.PublicKey - for i := 0; i < 3; i++ { - if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("-%v-", i+1)); err != nil { + for i := 1; i <= 3; i++ { + if err := ss.addCustomTestHost(types.PublicKey{byte(i)}, fmt.Sprintf("foo.com:100%d", i)); err != nil { t.Fatal(err) } hks = append(hks, types.PublicKey{byte(i)}) } hk1, hk2, hk3 := hks[0], hks[1], hks[2] - // Search by address. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", nil, 0, -1); err != nil || len(hosts) != 1 { + // search all hosts + his, err := ss.SearchHosts(context.Background(), api.HostFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 3 { + t.Fatal("unexpected") + } + + // assert offset & limit are taken into account + his, err = ss.SearchHosts(context.Background(), api.HostFilterModeAll, "", nil, 0, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } + his, err = ss.SearchHosts(context.Background(), api.HostFilterModeAll, "", nil, 1, 2) + if err != nil { + t.Fatal(err) + } else if len(his) != 2 { + t.Fatal("unexpected") + } + his, err = ss.SearchHosts(context.Background(), api.HostFilterModeAll, "", nil, 3, 1) + if err != nil { + t.Fatal(err) + } else if len(his) != 0 { + t.Fatal("unexpected") + } + + // assert address and key filters are taken into account + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "com:1001", nil, 0, -1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - // Filter by key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 2 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 2 { t.Fatal("unexpected", len(hosts), err) } - // Filter by address and key. - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "1", []types.PublicKey{hk1, hk2}, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "com:1002", []types.PublicKey{hk2, hk3}, 0, -1); err != nil || len(hosts) != 1 { t.Fatal("unexpected", len(hosts), err) } - // Filter by key and limit results - if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "3", []types.PublicKey{hk3}, 0, -1); err != nil || len(hosts) != 1 { + if hosts, err := ss.SearchHosts(ctx, api.HostFilterModeAll, "com:1002", []types.PublicKey{hk1}, 0, -1); err != nil || len(hosts) != 0 { t.Fatal("unexpected", len(hosts), err) } + + // assert host filter mode is taken into account + err = ss.UpdateHostBlocklistEntries(context.Background(), []string{"foo.com:1001"}, nil, false) + if err != nil { + t.Fatal(err) + } + his, err = ss.SearchHosts(context.Background(), api.HostFilterModeAllowed, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 2 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{2}) || his[1].Host.PublicKey != (types.PublicKey{3}) { + t.Fatal("unexpected", his[0].Host.PublicKey, his[1].Host.PublicKey) + } + his, err = ss.SearchHosts(context.Background(), api.HostFilterModeBlocked, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if len(his) != 1 { + t.Fatal("unexpected") + } else if his[0].Host.PublicKey != (types.PublicKey{1}) { + t.Fatal("unexpected", his) + } + err = ss.UpdateHostBlocklistEntries(context.Background(), nil, nil, true) + if err != nil { + t.Fatal(err) + } + + // add two autopilots + ap1 := "ap1" + err = ss.UpdateAutopilot(context.Background(), api.Autopilot{ID: ap1}) + if err != nil { + t.Fatal(err) + } + ap2 := "ap2" + err = ss.UpdateAutopilot(context.Background(), api.Autopilot{ID: ap2}) + if err != nil { + t.Fatal(err) + } + + // add host checks, h1 gets ap1 and h2 gets both, h3 gets none + h1c := newTestHostCheck() + h1c.Score.Age = .1 + err = ss.UpdateHostCheck(context.Background(), ap1, hk1, h1c) + if err != nil { + t.Fatal(err) + } + h2c1 := newTestHostCheck() + h2c1.Score.Age = .21 + err = ss.UpdateHostCheck(context.Background(), ap1, hk2, h2c1) + if err != nil { + t.Fatal(err) + } + h2c2 := newTestHostCheck() + h2c2.Score.Age = .22 + err = ss.UpdateHostCheck(context.Background(), ap2, hk2, h2c2) + if err != nil { + t.Fatal(err) + } + + // assert there are currently 3 checks + var cnt int64 + err = ss.db.Model(&dbHostCheck{}).Count(&cnt).Error + if err != nil { + t.Fatal(err) + } else if cnt != 3 { + t.Fatal("unexpected", cnt) + } + + // fetch all hosts + his, err = ss.SearchHosts(context.Background(), api.HostFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if cnt != 3 { + t.Fatal("unexpected", cnt) + } + + // assert h1 and h2 have the expected checks + if c1, ok := his[0].Checks[ap1]; !ok || c1 != h1c { + t.Fatal("unexpected", c1, ok) + } else if c2, ok := his[1].Checks[ap1]; !ok || c2 != h2c1 { + t.Fatal("unexpected", c2, ok) + } else if c3, ok := his[1].Checks[ap2]; !ok || c3 != h2c2 { + t.Fatal("unexpected", c3, ok) + } + + // assert cascade delete on host + err = ss.db.Exec("DELETE FROM hosts WHERE public_key = ?", publicKey(types.PublicKey{1})).Error + if err != nil { + t.Fatal(err) + } + err = ss.db.Model(&dbHostCheck{}).Count(&cnt).Error + if err != nil { + t.Fatal(err) + } else if cnt != 2 { + t.Fatal("unexpected", cnt) + } + + // assert cascade delete on autopilot + err = ss.db.Exec("DELETE FROM autopilots WHERE identifier IN (?,?)", ap1, ap2).Error + if err != nil { + t.Fatal(err) + } + err = ss.db.Model(&dbHostCheck{}).Count(&cnt).Error + if err != nil { + t.Fatal(err) + } else if cnt != 0 { + t.Fatal("unexpected", cnt) + } } // TestRecordScan is a test for recording scans. @@ -1064,222 +1197,6 @@ func TestAnnouncementMaxAge(t *testing.T) { } } -func TestHostInfo(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - - // fetch info for a non-existing autopilot - _, err := ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) - if !errors.Is(err, api.ErrAutopilotNotFound) { - t.Fatal(err) - } - - // add autopilot - err = ss.UpdateAutopilot(context.Background(), api.Autopilot{ID: "foo"}) - if err != nil { - t.Fatal(err) - } - - // fetch info for a non-existing host - _, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) - if !errors.Is(err, api.ErrHostNotFound) { - t.Fatal(err) - } - - // add host - err = ss.addTestHost(types.PublicKey{1}) - if err != nil { - t.Fatal(err) - } - h, err := ss.Host(context.Background(), types.PublicKey{1}) - if err != nil { - t.Fatal(err) - } - - // fetch non-existing info - _, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) - if !errors.Is(err, api.ErrHostInfoNotFound) { - t.Fatal(err) - } - - // add host info - want := newTestHostInfo(h.Host) - err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) - if err != nil { - t.Fatal(err) - } - - // fetch info - got, err := ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(got, want) { - t.Fatal("mismatch", cmp.Diff(got, want)) - } - - // update info - want.Score.Age = 0 - err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) - if err != nil { - t.Fatal(err) - } - - // fetch info - got, err = ss.HostInfo(context.Background(), "foo", types.PublicKey{1}) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(got, want) { - t.Fatal("mismatch") - } - - // add another host info - err = ss.addCustomTestHost(types.PublicKey{2}, "bar.com:1000") - if err != nil { - t.Fatal(err) - } - err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{2}, want.Gouging, want.Score, want.Usability) - if err != nil { - t.Fatal(err) - } - - // fetch all infos for autopilot - his, err := ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 2 { - t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{1}) || his[1].Host.PublicKey != (types.PublicKey{2}) { - t.Fatal("unexpected", his) - } - - // fetch infos using offset & limit - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, 1) - if err != nil { - t.Fatal(err) - } else if len(his) != 1 { - t.Fatal("unexpected") - } - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 1, 1) - if err != nil { - t.Fatal(err) - } else if len(his) != 1 { - t.Fatal("unexpected") - } - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 2, 1) - if err != nil { - t.Fatal(err) - } else if len(his) != 0 { - t.Fatal("unexpected") - } - - // fetch infos using net addresses - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "bar", nil, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 1 { - t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{2}) { - t.Fatal("unexpected", his) - } - - // fetch infos using keyIn - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{{2}}, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 1 { - t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{2}) { - t.Fatal("unexpected", his) - } - - // fetch infos using mode filters - err = ss.UpdateHostBlocklistEntries(context.Background(), []string{"bar.com:1000"}, nil, false) - if err != nil { - t.Fatal(err) - } - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAllowed, api.UsabilityFilterModeAll, "", nil, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 1 { - t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{1}) { - t.Fatal("unexpected", his) - } - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 1 { - t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{2}) { - t.Fatal("unexpected", his) - } - err = ss.UpdateHostBlocklistEntries(context.Background(), nil, nil, true) - if err != nil { - t.Fatal(err) - } - - // fetch infos using usability filters - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 0 { - t.Fatal("unexpected") - } - - // update info - want.Usability.Blocked = false - want.Usability.Offline = false - want.Usability.LowScore = false - want.Usability.RedundantIP = false - want.Usability.Gouging = false - want.Usability.NotAcceptingContracts = false - want.Usability.NotAnnounced = false - want.Usability.NotCompletingScan = false - err = ss.UpdateHostInfo(context.Background(), "foo", types.PublicKey{1}, want.Gouging, want.Score, want.Usability) - if err != nil { - t.Fatal(err) - } - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 1 { - t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{1}) { - t.Fatal("unexpected", his) - } - - // assert cascade delete on host - err = ss.db.Exec("DELETE FROM hosts WHERE public_key = ?", publicKey(types.PublicKey{1})).Error - if err != nil { - t.Fatal(err) - } - his, err = ss.HostInfos(context.Background(), "foo", api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) - if err != nil { - t.Fatal(err) - } else if len(his) != 0 { - t.Fatal("unexpected") - } - - // assert cascade delete on autopilot - var cnt uint64 - err = ss.db.Raw("SELECT COUNT(*) FROM host_infos").Scan(&cnt).Error - if err != nil { - t.Fatal(err) - } else if cnt == 0 { - t.Fatal("unexpected", cnt) - } - err = ss.db.Exec("DELETE FROM autopilots WHERE identifier = ?", "foo").Error - if err != nil { - t.Fatal(err) - } - err = ss.db.Raw("SELECT COUNT(*) FROM host_infos").Scan(&cnt).Error - if err != nil { - t.Fatal(err) - } else if cnt != 0 { - t.Fatal("unexpected", cnt) - } -} - // addTestHosts adds 'n' hosts to the db and returns their keys. func (s *SQLStore) addTestHosts(n int) (keys []types.PublicKey, err error) { cnt, err := s.contractsCount() @@ -1373,9 +1290,9 @@ func newTestTransaction(ha modules.HostAnnouncement, sk types.PrivateKey) stypes return stypes.Transaction{ArbitraryData: [][]byte{buf.Bytes()}} } -func newTestHostInfo(h hostdb.Host) api.HostInfo { - return api.HostInfo{ - Host: h, +func newTestHostCheck() api.HostCheck { + return api.HostCheck{ + Gouging: api.HostGougingBreakdown{ ContractErr: "foo", DownloadErr: "bar", diff --git a/stores/metadata_test.go b/stores/metadata_test.go index c16f927d1..eb082fb54 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -440,12 +440,12 @@ func TestContractsForHost(t *testing.T) { } contracts, _ := contractsForHost(ss.db, hosts[0]) - if len(contracts) != 1 || contracts[0].Host.convert().PublicKey.String() != hosts[0].convert().PublicKey.String() { + if len(contracts) != 1 || types.PublicKey(contracts[0].Host.PublicKey).String() != types.PublicKey(hosts[0].PublicKey).String() { t.Fatal("unexpected", len(contracts), contracts) } contracts, _ = contractsForHost(ss.db, hosts[1]) - if len(contracts) != 1 || contracts[0].Host.convert().PublicKey.String() != hosts[1].convert().PublicKey.String() { + if len(contracts) != 1 || types.PublicKey(contracts[0].Host.PublicKey).String() != types.PublicKey(hosts[1].PublicKey).String() { t.Fatalf("unexpected contracts, %+v", contracts) } } diff --git a/stores/migrations.go b/stores/migrations.go index 9f874935a..4ac6b755e 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -63,9 +63,9 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { }, }, { - ID: "00007_host_info", + ID: "00007_host_checks", Migrate: func(tx *gorm.DB) error { - return performMigration(tx, dbIdentifier, "00007_host_info", logger) + return performMigration(tx, dbIdentifier, "00007_host_checks", logger) }, }, } diff --git a/stores/migrations/mysql/main/migration_00007_host_checks.sql b/stores/migrations/mysql/main/migration_00007_host_checks.sql new file mode 100644 index 000000000..f96b9853c --- /dev/null +++ b/stores/migrations/mysql/main/migration_00007_host_checks.sql @@ -0,0 +1,52 @@ +-- dbHostCheck +CREATE TABLE `host_checks` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + + `db_autopilot_id` bigint unsigned NOT NULL, + `db_host_id` bigint unsigned NOT NULL, + + `usability_blocked` boolean NOT NULL DEFAULT false, + `usability_offline` boolean NOT NULL DEFAULT false, + `usability_low_score` boolean NOT NULL DEFAULT false, + `usability_redundant_ip` boolean NOT NULL DEFAULT false, + `usability_gouging` boolean NOT NULL DEFAULT false, + `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, + `usability_not_announced` boolean NOT NULL DEFAULT false, + `usability_not_completing_scan` boolean NOT NULL DEFAULT false, + + `score_age` double NOT NULL, + `score_collateral` double NOT NULL, + `score_interactions` double NOT NULL, + `score_storage_remaining` double NOT NULL, + `score_uptime` double NOT NULL, + `score_version` double NOT NULL, + `score_prices` double NOT NULL, + + `gouging_contract_err` text, + `gouging_download_err` text, + `gouging_gouging_err` text, + `gouging_prune_err` text, + `gouging_upload_err` text, + + PRIMARY KEY (`id`), + UNIQUE KEY `idx_host_checks_id` (`db_autopilot_id`, `db_host_id`), + INDEX `idx_host_checks_usability_blocked` (`usability_blocked`), + INDEX `idx_host_checks_usability_offline` (`usability_offline`), + INDEX `idx_host_checks_usability_low_score` (`usability_low_score`), + INDEX `idx_host_checks_usability_redundant_ip` (`usability_redundant_ip`), + INDEX `idx_host_checks_usability_gouging` (`usability_gouging`), + INDEX `idx_host_checks_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), + INDEX `idx_host_checks_usability_not_announced` (`usability_not_announced`), + INDEX `idx_host_checks_usability_not_completing_scan` (`usability_not_completing_scan`), + INDEX `idx_host_checks_score_age` (`score_age`), + INDEX `idx_host_checks_score_collateral` (`score_collateral`), + INDEX `idx_host_checks_score_interactions` (`score_interactions`), + INDEX `idx_host_checks_score_storage_remaining` (`score_storage_remaining`), + INDEX `idx_host_checks_score_uptime` (`score_uptime`), + INDEX `idx_host_checks_score_version` (`score_version`), + INDEX `idx_host_checks_score_prices` (`score_prices`), + + CONSTRAINT `fk_host_checks_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_host_checks_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/main/migration_00007_host_info.sql b/stores/migrations/mysql/main/migration_00007_host_info.sql deleted file mode 100644 index c13f5c396..000000000 --- a/stores/migrations/mysql/main/migration_00007_host_info.sql +++ /dev/null @@ -1,52 +0,0 @@ --- dbHostInfo -CREATE TABLE `host_infos` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT, - `created_at` datetime(3) DEFAULT NULL, - - `db_autopilot_id` bigint unsigned NOT NULL, - `db_host_id` bigint unsigned NOT NULL, - - `usability_blocked` boolean NOT NULL DEFAULT false, - `usability_offline` boolean NOT NULL DEFAULT false, - `usability_low_score` boolean NOT NULL DEFAULT false, - `usability_redundant_ip` boolean NOT NULL DEFAULT false, - `usability_gouging` boolean NOT NULL DEFAULT false, - `usability_not_accepting_contracts` boolean NOT NULL DEFAULT false, - `usability_not_announced` boolean NOT NULL DEFAULT false, - `usability_not_completing_scan` boolean NOT NULL DEFAULT false, - - `score_age` double NOT NULL, - `score_collateral` double NOT NULL, - `score_interactions` double NOT NULL, - `score_storage_remaining` double NOT NULL, - `score_uptime` double NOT NULL, - `score_version` double NOT NULL, - `score_prices` double NOT NULL, - - `gouging_contract_err` text, - `gouging_download_err` text, - `gouging_gouging_err` text, - `gouging_prune_err` text, - `gouging_upload_err` text, - - PRIMARY KEY (`id`), - UNIQUE KEY `idx_host_infos_id` (`db_autopilot_id`, `db_host_id`), - INDEX `idx_host_infos_usability_blocked` (`usability_blocked`), - INDEX `idx_host_infos_usability_offline` (`usability_offline`), - INDEX `idx_host_infos_usability_low_score` (`usability_low_score`), - INDEX `idx_host_infos_usability_redundant_ip` (`usability_redundant_ip`), - INDEX `idx_host_infos_usability_gouging` (`usability_gouging`), - INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), - INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), - INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), - INDEX `idx_host_infos_score_age` (`score_age`), - INDEX `idx_host_infos_score_collateral` (`score_collateral`), - INDEX `idx_host_infos_score_interactions` (`score_interactions`), - INDEX `idx_host_infos_score_storage_remaining` (`score_storage_remaining`), - INDEX `idx_host_infos_score_uptime` (`score_uptime`), - INDEX `idx_host_infos_score_version` (`score_version`), - INDEX `idx_host_infos_score_prices` (`score_prices`), - - CONSTRAINT `fk_host_infos_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_host_infos_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index e39b7f963..446b2a805 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -422,8 +422,8 @@ CREATE TABLE `object_user_metadata` ( CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; --- dbHostInfo -CREATE TABLE `host_infos` ( +-- dbHostCheck +CREATE TABLE `host_checks` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, @@ -454,25 +454,25 @@ CREATE TABLE `host_infos` ( `gouging_upload_err` text, PRIMARY KEY (`id`), - UNIQUE KEY `idx_host_infos_id` (`db_autopilot_id`, `db_host_id`), - INDEX `idx_host_infos_usability_blocked` (`usability_blocked`), - INDEX `idx_host_infos_usability_offline` (`usability_offline`), - INDEX `idx_host_infos_usability_low_score` (`usability_low_score`), - INDEX `idx_host_infos_usability_redundant_ip` (`usability_redundant_ip`), - INDEX `idx_host_infos_usability_gouging` (`usability_gouging`), - INDEX `idx_host_infos_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), - INDEX `idx_host_infos_usability_not_announced` (`usability_not_announced`), - INDEX `idx_host_infos_usability_not_completing_scan` (`usability_not_completing_scan`), - INDEX `idx_host_infos_score_age` (`score_age`), - INDEX `idx_host_infos_score_collateral` (`score_collateral`), - INDEX `idx_host_infos_score_interactions` (`score_interactions`), - INDEX `idx_host_infos_score_storage_remaining` (`score_storage_remaining`), - INDEX `idx_host_infos_score_uptime` (`score_uptime`), - INDEX `idx_host_infos_score_version` (`score_version`), - INDEX `idx_host_infos_score_prices` (`score_prices`), - - CONSTRAINT `fk_host_infos_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_host_infos_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE + UNIQUE KEY `idx_host_checks_id` (`db_autopilot_id`, `db_host_id`), + INDEX `idx_host_checks_usability_blocked` (`usability_blocked`), + INDEX `idx_host_checks_usability_offline` (`usability_offline`), + INDEX `idx_host_checks_usability_low_score` (`usability_low_score`), + INDEX `idx_host_checks_usability_redundant_ip` (`usability_redundant_ip`), + INDEX `idx_host_checks_usability_gouging` (`usability_gouging`), + INDEX `idx_host_checks_usability_not_accepting_contracts` (`usability_not_accepting_contracts`), + INDEX `idx_host_checks_usability_not_announced` (`usability_not_announced`), + INDEX `idx_host_checks_usability_not_completing_scan` (`usability_not_completing_scan`), + INDEX `idx_host_checks_score_age` (`score_age`), + INDEX `idx_host_checks_score_collateral` (`score_collateral`), + INDEX `idx_host_checks_score_interactions` (`score_interactions`), + INDEX `idx_host_checks_score_storage_remaining` (`score_storage_remaining`), + INDEX `idx_host_checks_score_uptime` (`score_uptime`), + INDEX `idx_host_checks_score_version` (`score_version`), + INDEX `idx_host_checks_score_prices` (`score_prices`), + + CONSTRAINT `fk_host_checks_autopilot` FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_host_checks_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- create default bucket diff --git a/stores/migrations/sqlite/main/migration_00007_host_checks.sql b/stores/migrations/sqlite/main/migration_00007_host_checks.sql new file mode 100644 index 000000000..da13460c6 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00007_host_checks.sql @@ -0,0 +1,52 @@ +-- dbHostCheck +CREATE TABLE `host_checks` ( + `id` INTEGER PRIMARY KEY AUTOINCREMENT, + `created_at` datetime, + + `db_autopilot_id` INTEGER NOT NULL, + `db_host_id` INTEGER NOT NULL, + + `usability_blocked` INTEGER NOT NULL DEFAULT 0, + `usability_offline` INTEGER NOT NULL DEFAULT 0, + `usability_low_score` INTEGER NOT NULL DEFAULT 0, + `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, + `usability_gouging` INTEGER NOT NULL DEFAULT 0, + `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, + `usability_not_announced` INTEGER NOT NULL DEFAULT 0, + `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, + + `score_age` REAL NOT NULL, + `score_collateral` REAL NOT NULL, + `score_interactions` REAL NOT NULL, + `score_storage_remaining` REAL NOT NULL, + `score_uptime` REAL NOT NULL, + `score_version` REAL NOT NULL, + `score_prices` REAL NOT NULL, + + `gouging_contract_err` TEXT, + `gouging_download_err` TEXT, + `gouging_gouging_err` TEXT, + `gouging_prune_err` TEXT, + `gouging_upload_err` TEXT, + + FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, + FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE +); + +-- Indexes creation +CREATE UNIQUE INDEX `idx_host_checks_id` ON `host_checks` (`db_autopilot_id`, `db_host_id`); +CREATE INDEX `idx_host_checks_usability_blocked` ON `host_checks` (`usability_blocked`); +CREATE INDEX `idx_host_checks_usability_offline` ON `host_checks` (`usability_offline`); +CREATE INDEX `idx_host_checks_usability_low_score` ON `host_checks` (`usability_low_score`); +CREATE INDEX `idx_host_checks_usability_redundant_ip` ON `host_checks` (`usability_redundant_ip`); +CREATE INDEX `idx_host_checks_usability_gouging` ON `host_checks` (`usability_gouging`); +CREATE INDEX `idx_host_checks_usability_not_accepting_contracts` ON `host_checks` (`usability_not_accepting_contracts`); +CREATE INDEX `idx_host_checks_usability_not_announced` ON `host_checks` (`usability_not_announced`); +CREATE INDEX `idx_host_checks_usability_not_completing_scan` ON `host_checks` (`usability_not_completing_scan`); +CREATE INDEX `idx_host_checks_score_age` ON `host_checks` (`score_age`); +CREATE INDEX `idx_host_checks_score_collateral` ON `host_checks` (`score_collateral`); +CREATE INDEX `idx_host_checks_score_interactions` ON `host_checks` (`score_interactions`); +CREATE INDEX `idx_host_checks_score_storage_remaining` ON `host_checks` (`score_storage_remaining`); +CREATE INDEX `idx_host_checks_score_uptime` ON `host_checks` (`score_uptime`); +CREATE INDEX `idx_host_checks_score_version` ON `host_checks` (`score_version`); +CREATE INDEX `idx_host_checks_score_prices` ON `host_checks` (`score_prices`); diff --git a/stores/migrations/sqlite/main/migration_00007_host_info.sql b/stores/migrations/sqlite/main/migration_00007_host_info.sql deleted file mode 100644 index 910dd637c..000000000 --- a/stores/migrations/sqlite/main/migration_00007_host_info.sql +++ /dev/null @@ -1,52 +0,0 @@ --- dbHostInfo -CREATE TABLE `host_infos` ( - `id` INTEGER PRIMARY KEY AUTOINCREMENT, - `created_at` datetime, - - `db_autopilot_id` INTEGER NOT NULL, - `db_host_id` INTEGER NOT NULL, - - `usability_blocked` INTEGER NOT NULL DEFAULT 0, - `usability_offline` INTEGER NOT NULL DEFAULT 0, - `usability_low_score` INTEGER NOT NULL DEFAULT 0, - `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, - `usability_gouging` INTEGER NOT NULL DEFAULT 0, - `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, - `usability_not_announced` INTEGER NOT NULL DEFAULT 0, - `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, - - `score_age` REAL NOT NULL, - `score_collateral` REAL NOT NULL, - `score_interactions` REAL NOT NULL, - `score_storage_remaining` REAL NOT NULL, - `score_uptime` REAL NOT NULL, - `score_version` REAL NOT NULL, - `score_prices` REAL NOT NULL, - - `gouging_contract_err` TEXT, - `gouging_download_err` TEXT, - `gouging_gouging_err` TEXT, - `gouging_prune_err` TEXT, - `gouging_upload_err` TEXT, - - FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, - FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE -); - --- Indexes creation -CREATE UNIQUE INDEX `idx_host_infos_id` ON `host_infos` (`db_autopilot_id`, `db_host_id`); -CREATE INDEX `idx_host_infos_usability_blocked` ON `host_infos` (`usability_blocked`); -CREATE INDEX `idx_host_infos_usability_offline` ON `host_infos` (`usability_offline`); -CREATE INDEX `idx_host_infos_usability_low_score` ON `host_infos` (`usability_low_score`); -CREATE INDEX `idx_host_infos_usability_redundant_ip` ON `host_infos` (`usability_redundant_ip`); -CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_gouging`); -CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); -CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); -CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); -CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); -CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); -CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); -CREATE INDEX `idx_host_infos_score_storage_remaining` ON `host_infos` (`score_storage_remaining`); -CREATE INDEX `idx_host_infos_score_uptime` ON `host_infos` (`score_uptime`); -CREATE INDEX `idx_host_infos_score_version` ON `host_infos` (`score_version`); -CREATE INDEX `idx_host_infos_score_prices` ON `host_infos` (`score_prices`); diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 791fce1ca..3fca53a3a 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -149,24 +149,24 @@ CREATE UNIQUE INDEX `idx_module_event_url` ON `webhooks`(`module`,`event`,`url`) CREATE TABLE `object_user_metadata` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer DEFAULT NULL,`db_multipart_upload_id` integer DEFAULT NULL,`key` text NOT NULL,`value` text, CONSTRAINT `fk_object_user_metadata` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL); CREATE UNIQUE INDEX `idx_object_user_metadata_key` ON `object_user_metadata`(`db_object_id`,`db_multipart_upload_id`,`key`); --- dbHostInfo -CREATE TABLE `host_infos` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `created_at` datetime, `db_autopilot_id` INTEGER NOT NULL, `db_host_id` INTEGER NOT NULL, `usability_blocked` INTEGER NOT NULL DEFAULT 0, `usability_offline` INTEGER NOT NULL DEFAULT 0, `usability_low_score` INTEGER NOT NULL DEFAULT 0, `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, `usability_gouging` INTEGER NOT NULL DEFAULT 0, `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, `usability_not_announced` INTEGER NOT NULL DEFAULT 0, `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, `score_age` REAL NOT NULL, `score_collateral` REAL NOT NULL, `score_interactions` REAL NOT NULL, `score_storage_remaining` REAL NOT NULL, `score_uptime` REAL NOT NULL, `score_version` REAL NOT NULL, `score_prices` REAL NOT NULL, `gouging_contract_err` TEXT, `gouging_download_err` TEXT, `gouging_gouging_err` TEXT, `gouging_prune_err` TEXT, `gouging_upload_err` TEXT, FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE); -CREATE UNIQUE INDEX `idx_host_infos_id` ON `host_infos` (`db_autopilot_id`, `db_host_id`); -CREATE INDEX `idx_host_infos_usability_blocked` ON `host_infos` (`usability_blocked`); -CREATE INDEX `idx_host_infos_usability_offline` ON `host_infos` (`usability_offline`); -CREATE INDEX `idx_host_infos_usability_low_score` ON `host_infos` (`usability_low_score`); -CREATE INDEX `idx_host_infos_usability_redundant_ip` ON `host_infos` (`usability_redundant_ip`); -CREATE INDEX `idx_host_infos_usability_gouging` ON `host_infos` (`usability_gouging`); -CREATE INDEX `idx_host_infos_usability_not_accepting_contracts` ON `host_infos` (`usability_not_accepting_contracts`); -CREATE INDEX `idx_host_infos_usability_not_announced` ON `host_infos` (`usability_not_announced`); -CREATE INDEX `idx_host_infos_usability_not_completing_scan` ON `host_infos` (`usability_not_completing_scan`); -CREATE INDEX `idx_host_infos_score_age` ON `host_infos` (`score_age`); -CREATE INDEX `idx_host_infos_score_collateral` ON `host_infos` (`score_collateral`); -CREATE INDEX `idx_host_infos_score_interactions` ON `host_infos` (`score_interactions`); -CREATE INDEX `idx_host_infos_score_storage_remaining` ON `host_infos` (`score_storage_remaining`); -CREATE INDEX `idx_host_infos_score_uptime` ON `host_infos` (`score_uptime`); -CREATE INDEX `idx_host_infos_score_version` ON `host_infos` (`score_version`); -CREATE INDEX `idx_host_infos_score_prices` ON `host_infos` (`score_prices`); +-- dbHostCheck +CREATE TABLE `host_checks` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `created_at` datetime, `db_autopilot_id` INTEGER NOT NULL, `db_host_id` INTEGER NOT NULL, `usability_blocked` INTEGER NOT NULL DEFAULT 0, `usability_offline` INTEGER NOT NULL DEFAULT 0, `usability_low_score` INTEGER NOT NULL DEFAULT 0, `usability_redundant_ip` INTEGER NOT NULL DEFAULT 0, `usability_gouging` INTEGER NOT NULL DEFAULT 0, `usability_not_accepting_contracts` INTEGER NOT NULL DEFAULT 0, `usability_not_announced` INTEGER NOT NULL DEFAULT 0, `usability_not_completing_scan` INTEGER NOT NULL DEFAULT 0, `score_age` REAL NOT NULL, `score_collateral` REAL NOT NULL, `score_interactions` REAL NOT NULL, `score_storage_remaining` REAL NOT NULL, `score_uptime` REAL NOT NULL, `score_version` REAL NOT NULL, `score_prices` REAL NOT NULL, `gouging_contract_err` TEXT, `gouging_download_err` TEXT, `gouging_gouging_err` TEXT, `gouging_prune_err` TEXT, `gouging_upload_err` TEXT, FOREIGN KEY (`db_autopilot_id`) REFERENCES `autopilots` (`id`) ON DELETE CASCADE, FOREIGN KEY (`db_host_id`) REFERENCES `hosts` (`id`) ON DELETE CASCADE); +CREATE UNIQUE INDEX `idx_host_checks_id` ON `host_checks` (`db_autopilot_id`, `db_host_id`); +CREATE INDEX `idx_host_checks_usability_blocked` ON `host_checks` (`usability_blocked`); +CREATE INDEX `idx_host_checks_usability_offline` ON `host_checks` (`usability_offline`); +CREATE INDEX `idx_host_checks_usability_low_score` ON `host_checks` (`usability_low_score`); +CREATE INDEX `idx_host_checks_usability_redundant_ip` ON `host_checks` (`usability_redundant_ip`); +CREATE INDEX `idx_host_checks_usability_gouging` ON `host_checks` (`usability_gouging`); +CREATE INDEX `idx_host_checks_usability_not_accepting_contracts` ON `host_checks` (`usability_not_accepting_contracts`); +CREATE INDEX `idx_host_checks_usability_not_announced` ON `host_checks` (`usability_not_announced`); +CREATE INDEX `idx_host_checks_usability_not_completing_scan` ON `host_checks` (`usability_not_completing_scan`); +CREATE INDEX `idx_host_checks_score_age` ON `host_checks` (`score_age`); +CREATE INDEX `idx_host_checks_score_collateral` ON `host_checks` (`score_collateral`); +CREATE INDEX `idx_host_checks_score_interactions` ON `host_checks` (`score_interactions`); +CREATE INDEX `idx_host_checks_score_storage_remaining` ON `host_checks` (`score_storage_remaining`); +CREATE INDEX `idx_host_checks_score_uptime` ON `host_checks` (`score_uptime`); +CREATE INDEX `idx_host_checks_score_version` ON `host_checks` (`score_version`); +CREATE INDEX `idx_host_checks_score_prices` ON `host_checks` (`score_prices`); -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 7b3609c0b..6e324f67e 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -260,13 +260,20 @@ var errSectorOutOfBounds = errors.New("sector out of bounds") type hostMock struct { hk types.PublicKey - hi hostdb.HostInfo + hi api.HostInfo } func newHostMock(hk types.PublicKey) *hostMock { return &hostMock{ hk: hk, - hi: hostdb.HostInfo{Host: hostdb.Host{PublicKey: hk, Scanned: true}}, + hi: api.HostInfo{ + HostInfo: hostdb.HostInfo{ + Host: hostdb.Host{ + PublicKey: hk, + Scanned: true, + }, + }, + }, } } @@ -282,13 +289,13 @@ func newHostStoreMock() *hostStoreMock { return &hostStoreMock{hosts: make(map[types.PublicKey]*hostMock)} } -func (hs *hostStoreMock) Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) { +func (hs *hostStoreMock) Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) { hs.mu.Lock() defer hs.mu.Unlock() h, ok := hs.hosts[hostKey] if !ok { - return hostdb.HostInfo{}, api.ErrHostNotFound + return api.HostInfo{}, api.ErrHostNotFound } return h.hi, nil } diff --git a/worker/worker.go b/worker/worker.go index 707a6a7f1..fda44dee6 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -110,7 +110,7 @@ type ( RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error - Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) + Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) } ObjectStore interface { From 949e6c35477957f44843f2fa8f5323706f1c46a3 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 21 Mar 2024 20:42:07 +0100 Subject: [PATCH 101/201] all: cleanup PR --- api/host.go | 10 +++--- autopilot/autopilot.go | 10 +++--- autopilot/autopilot_test.go | 4 +-- autopilot/contractor.go | 6 ++-- autopilot/hostinfo.go | 2 +- autopilot/scanner.go | 2 +- autopilot/scanner_test.go | 6 ++-- bus/bus.go | 4 +-- bus/client/hosts.go | 4 +-- stores/hostdb.go | 66 ++++++++++--------------------------- worker/mocks_test.go | 8 ++--- worker/worker.go | 2 +- 12 files changed, 46 insertions(+), 78 deletions(-) diff --git a/api/host.go b/api/host.go index 50b058642..6c95165c9 100644 --- a/api/host.go +++ b/api/host.go @@ -24,10 +24,6 @@ var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") - - // ErrHostInfoNotFound is returned when host info can't be retrieved from - // the database. - ErrHostInfoNotFound = errors.New("host info doesn't exist in hostdb") ) type ( @@ -47,11 +43,14 @@ type ( MinRecentScanFailures uint64 `json:"minRecentScanFailures"` } + // HostsRequest is the request type for the /api/autopilot/hosts endpoint. HostsRequest struct { UsabilityMode string `json:"usabilityMode"` SearchHostsRequest } + // SearchHostsRequest is the request type for the /api/bus/search/hosts + // endpoint. SearchHostsRequest struct { Offset int `json:"offset"` Limit int `json:"limit"` @@ -92,7 +91,6 @@ type ( SearchHostOptions struct { AddressContains string FilterMode string - UsabilityMode string KeyIn []types.PublicKey Limit int Offset int @@ -121,7 +119,7 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { } type ( - HostInfo struct { + Host struct { hostdb.HostInfo Checks map[string]HostCheck `json:"checks"` } diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 7562de960..b7b2f97d4 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -53,10 +53,10 @@ type Bus interface { PrunableData(ctx context.Context) (prunableData api.ContractsPrunableDataResponse, err error) // hostdb - Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) + Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.HostInfo, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) // metrics RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error @@ -737,7 +737,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { jc.Encode(hosts) } -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.HostInfo) (usables uint64) { +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { usable, _ := isUsableHost(cfg, rs, gc, host.HostInfo, smallestValidScore, 0) @@ -751,7 +751,7 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // evaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.HostInfo) (resp api.ConfigEvaluationResponse) { +func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (resp api.ConfigEvaluationResponse) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) resp.Hosts = uint64(len(hosts)) @@ -866,7 +866,7 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // optimiseGougingSetting tries to optimise one field of the gouging settings to // try and hit the target number of contracts. -func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []api.HostInfo) bool { +func optimiseGougingSetting(gs *api.GougingSettings, field *types.Currency, cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, hosts []api.Host) bool { if cfg.Contracts.Amount == 0 { return true // nothing to do } diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index 2da3fc7be..2edf88516 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -14,9 +14,9 @@ import ( func TestOptimiseGougingSetting(t *testing.T) { // create 10 hosts that should all be usable - var hosts []api.HostInfo + var hosts []api.Host for i := 0; i < 10; i++ { - hosts = append(hosts, api.HostInfo{ + hosts = append(hosts, api.Host{ HostInfo: hostdb.HostInfo{ Host: hostdb.Host{ KnownSince: time.Unix(0, 0), diff --git a/autopilot/contractor.go b/autopilot/contractor.go index d376d682e..6f530d405 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -1297,7 +1297,7 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *contractor) candidateHosts(ctx context.Context, hosts []api.HostInfo, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { +func (c *contractor) candidateHosts(ctx context.Context, hosts []api.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostResult, error) { start := time.Now() // fetch consensus state @@ -1311,7 +1311,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []api.HostInfo, u gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) // select unused hosts that passed a scan - var unused []api.HostInfo + var unused []api.Host var excluded, notcompletedscan int for _, h := range hosts { // filter out used hosts @@ -1612,7 +1612,7 @@ func (c *contractor) tryPerformPruning(wp *workerPool) { }() } -func (c *contractor) hostForContract(ctx context.Context, fcid types.FileContractID) (host api.HostInfo, metadata api.ContractMetadata, err error) { +func (c *contractor) hostForContract(ctx context.Context, fcid types.FileContractID) (host api.Host, metadata api.ContractMetadata, err error) { // fetch the contract metadata, err = c.ap.bus.Contract(ctx, fcid) if err != nil { diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go index 5af554e6b..d48ce7760 100644 --- a/autopilot/hostinfo.go +++ b/autopilot/hostinfo.go @@ -66,7 +66,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api }, nil } -func (c *contractor) hostInfoFromCache(ctx context.Context, host api.HostInfo) (hi hostInfo, found bool) { +func (c *contractor) hostInfoFromCache(ctx context.Context, host api.Host) (hi hostInfo, found bool) { // grab host details from cache c.mu.Lock() hi, found = c.cachedHostInfo[host.PublicKey] diff --git a/autopilot/scanner.go b/autopilot/scanner.go index a2d30abfa..28b2e1fe9 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -31,7 +31,7 @@ type ( // a bit, we currently use inline interfaces to avoid having to update the // scanner tests with every interface change bus interface { - SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.HostInfo, error) + SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 027366662..435e03b90 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -19,7 +19,7 @@ type mockBus struct { reqs []string } -func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.HostInfo, error) { +func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) { b.reqs = append(b.reqs, fmt.Sprintf("%d-%d", opts.Offset, opts.Offset+opts.Limit)) start := opts.Offset @@ -32,9 +32,9 @@ func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ( end = len(b.hosts) } - his := make([]api.HostInfo, len(b.hosts[start:end])) + his := make([]api.Host, len(b.hosts[start:end])) for i, h := range b.hosts[start:end] { - his[i] = api.HostInfo{ + his[i] = api.Host{ HostInfo: hostdb.HostInfo{ Host: h, }, diff --git a/bus/bus.go b/bus/bus.go index 510353edd..d68e46309 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -91,13 +91,13 @@ type ( // A HostDB stores information about hosts. HostDB interface { - Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) + Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error - SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostInfo, error) + SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 460291dd6..f0ab56fd1 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -12,7 +12,7 @@ import ( ) // Host returns information about a particular host known to the server. -func (c *Client) Host(ctx context.Context, hostKey types.PublicKey) (h api.HostInfo, err error) { +func (c *Client) Host(ctx context.Context, hostKey types.PublicKey) (h api.Host, err error) { err = c.c.WithContext(ctx).GET(fmt.Sprintf("/host/%s", hostKey), &h) return } @@ -78,7 +78,7 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) } // SearchHosts returns all hosts that match certain search criteria. -func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []api.HostInfo, err error) { +func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []api.Host, err error) { err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ Offset: opts.Offset, Limit: opts.Limit, diff --git a/stores/hostdb.go b/stores/hostdb.go index 01a9e594e..f97af5fe9 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -312,7 +312,7 @@ func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } func (dbBlocklistEntry) TableName() string { return "host_blocklist_entries" } // convert converts a host into a api.HostInfo -func (h dbHost) convert(blocked bool) api.HostInfo { +func (h dbHost) convert(blocked bool) api.Host { var lastScan time.Time if h.LastScan > 0 { lastScan = time.Unix(0, h.LastScan) @@ -321,7 +321,7 @@ func (h dbHost) convert(blocked bool) api.HostInfo { for _, check := range h.Checks { checks[check.DBAutopilot.Identifier] = check.convert() } - return api.HostInfo{ + return api.Host{ HostInfo: hostdb.HostInfo{ Host: hostdb.Host{ KnownSince: h.CreatedAt, @@ -488,7 +488,7 @@ func (e *dbBlocklistEntry) blocks(h dbHost) bool { } // Host returns information about a host. -func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) { +func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) { var h dbHost tx := ss.db. @@ -498,47 +498,14 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host Preload("Blocklist"). Take(&h) if errors.Is(tx.Error, gorm.ErrRecordNotFound) { - return api.HostInfo{}, api.ErrHostNotFound + return api.Host{}, api.ErrHostNotFound } else if tx.Error != nil { - return api.HostInfo{}, tx.Error + return api.Host{}, tx.Error } return h.convert(ss.isBlocked(h)), nil } -func (ss *SQLStore) HostInfo(ctx context.Context, autopilotID string, hk types.PublicKey) (hi api.HostInfo, err error) { - err = ss.db.Transaction(func(tx *gorm.DB) error { - var entity dbHostCheck - if err := tx. - Model(&dbHostCheck{}). - Where("db_autopilot_id = (?)", gorm.Expr("SELECT id FROM autopilots WHERE identifier = ?", autopilotID)). - Where("db_host_id = (?)", gorm.Expr("SELECT id FROM hosts WHERE public_key = ?", publicKey(hk))). - Preload("DBHost"). - First(&entity). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - if err := tx. - Model(&dbAutopilot{}). - Where("identifier = ?", autopilotID). - First(nil). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrAutopilotNotFound - } else if err := tx. - Model(&dbHost{}). - Where("public_key = ?", publicKey(hk)). - First(nil). - Error; errors.Is(err, gorm.ErrRecordNotFound) { - return api.ErrHostNotFound - } - return api.ErrHostInfoNotFound - } else if err != nil { - return err - } - // hi = entity.convert() - return nil - }) - return -} - func (ss *SQLStore) UpdateHostCheck(ctx context.Context, autopilotID string, hk types.PublicKey, hc api.HostCheck) (err error) { err = ss.db.Transaction(func(tx *gorm.DB) error { // fetch ap id @@ -640,7 +607,7 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, return hostAddresses, err } -func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.HostInfo, error) { +func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) { if offset < 0 { return nil, ErrNegativeOffset } @@ -658,10 +625,10 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains query := ss.db. Model(&dbHost{}). Scopes( - hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist(), "hosts"), + hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist()), hostNetAddress(addressContains), hostPublicKey(keyIn), - ).Preload("Checks.DBAutopilot") + ) // preload allowlist and blocklist if filterMode == api.HostFilterModeAll { @@ -670,7 +637,10 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains Preload("Blocklist") } - var hosts []api.HostInfo + // preload host checks + query = query.Preload("Checks.DBAutopilot") + + var hosts []api.Host var fullHosts []dbHost err := query. Offset(offset). @@ -695,7 +665,7 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, filterMode, addressContains } // Hosts returns non-blocked hosts at given offset and limit. -func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]api.HostInfo, error) { +func (ss *SQLStore) Hosts(ctx context.Context, offset, limit int) ([]api.Host, error) { return ss.SearchHosts(ctx, api.HostFilterModeAllowed, "", nil, offset, limit) } @@ -1119,22 +1089,22 @@ func hostPublicKey(keyIn []types.PublicKey) func(*gorm.DB) *gorm.DB { // hostFilter can be used as a scope to filter hosts based on their filter mode, // returning either all, allowed or blocked hosts. -func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool, hostTableAlias string) func(*gorm.DB) *gorm.DB { +func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool) func(*gorm.DB) *gorm.DB { return func(db *gorm.DB) *gorm.DB { switch filterMode { case api.HostFilterModeAllowed: if hasAllowlist { - db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + db = db.Where("EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") } if hasBlocklist { - db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + db = db.Where("NOT EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") } case api.HostFilterModeBlocked: if hasAllowlist { - db = db.Where(fmt.Sprintf("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + db = db.Where("NOT EXISTS (SELECT 1 FROM host_allowlist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") } if hasBlocklist { - db = db.Where(fmt.Sprintf("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = %s.id)", hostTableAlias)) + db = db.Where("EXISTS (SELECT 1 FROM host_blocklist_entry_hosts hbeh WHERE hbeh.db_host_id = hosts.id)") } if !hasAllowlist && !hasBlocklist { // if neither an allowlist nor a blocklist exist, all hosts are allowed diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 6e324f67e..7e6d3afe9 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -260,13 +260,13 @@ var errSectorOutOfBounds = errors.New("sector out of bounds") type hostMock struct { hk types.PublicKey - hi api.HostInfo + hi api.Host } func newHostMock(hk types.PublicKey) *hostMock { return &hostMock{ hk: hk, - hi: api.HostInfo{ + hi: api.Host{ HostInfo: hostdb.HostInfo{ Host: hostdb.Host{ PublicKey: hk, @@ -289,13 +289,13 @@ func newHostStoreMock() *hostStoreMock { return &hostStoreMock{hosts: make(map[types.PublicKey]*hostMock)} } -func (hs *hostStoreMock) Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) { +func (hs *hostStoreMock) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) { hs.mu.Lock() defer hs.mu.Unlock() h, ok := hs.hosts[hostKey] if !ok { - return api.HostInfo{}, api.ErrHostNotFound + return api.Host{}, api.ErrHostNotFound } return h.hi, nil } diff --git a/worker/worker.go b/worker/worker.go index fda44dee6..7c14b1afd 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -110,7 +110,7 @@ type ( RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error - Host(ctx context.Context, hostKey types.PublicKey) (api.HostInfo, error) + Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) } ObjectStore interface { From 4d5c6491c7a8ee8b4caf6c38c62d5c7b90e14244 Mon Sep 17 00:00:00 2001 From: PJ Date: Thu, 21 Mar 2024 20:51:48 +0100 Subject: [PATCH 102/201] api: cleanup api.Host type --- api/host.go | 5 ++-- autopilot/autopilot.go | 4 +-- autopilot/autopilot_test.go | 48 +++++++++++++++---------------- autopilot/contractor.go | 6 ++-- autopilot/hostfilter.go | 3 +- autopilot/hostinfo.go | 4 +-- autopilot/scanner_test.go | 10 ++----- bus/client/hosts.go | 2 +- hostdb/hostdb.go | 6 ---- internal/test/e2e/cluster_test.go | 4 +-- stores/hostdb.go | 48 +++++++++++++++---------------- worker/mocks_test.go | 8 ++---- 12 files changed, 67 insertions(+), 81 deletions(-) diff --git a/api/host.go b/api/host.go index 6c95165c9..cce3336ae 100644 --- a/api/host.go +++ b/api/host.go @@ -120,8 +120,9 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { type ( Host struct { - hostdb.HostInfo - Checks map[string]HostCheck `json:"checks"` + hostdb.Host + Blocked bool `json:"blocked"` + Checks map[string]HostCheck `json:"checks"` } HostCheck struct { diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index b7b2f97d4..a9fa52343 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -740,7 +740,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { - usable, _ := isUsableHost(cfg, rs, gc, host.HostInfo, smallestValidScore, 0) + usable, _ := isUsableHost(cfg, rs, gc, host, smallestValidScore, 0) if usable { usables++ } @@ -756,7 +756,7 @@ func evaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu resp.Hosts = uint64(len(hosts)) for _, host := range hosts { - usable, usableBreakdown := isUsableHost(cfg, rs, gc, host.HostInfo, 0, 0) + usable, usableBreakdown := isUsableHost(cfg, rs, gc, host, 0, 0) if usable { resp.Usable++ continue diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index 2edf88516..a21b55c7b 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -17,33 +17,33 @@ func TestOptimiseGougingSetting(t *testing.T) { var hosts []api.Host for i := 0; i < 10; i++ { hosts = append(hosts, api.Host{ - HostInfo: hostdb.HostInfo{ - Host: hostdb.Host{ - KnownSince: time.Unix(0, 0), - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{ - CollateralCost: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - }, - }, - Settings: rhpv2.HostSettings{ - AcceptingContracts: true, - Collateral: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - Version: "1.6.0", - }, - Interactions: hostdb.Interactions{ - Uptime: time.Hour * 1000, - LastScan: time.Now(), - LastScanSuccess: true, - SecondToLastScanSuccess: true, - TotalScans: 100, + + Host: hostdb.Host{ + KnownSince: time.Unix(0, 0), + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), }, - LastAnnouncement: time.Unix(0, 0), - Scanned: true, }, - Blocked: false, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: hostdb.Interactions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, }, + Blocked: false, + Checks: nil, }) } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 6f530d405..049059cad 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -295,7 +295,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( for _, h := range hosts { // ignore the pricetable's HostBlockHeight by setting it to our own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, h.HostInfo, minScore, hostData[h.PublicKey]) + isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, h, minScore, hostData[h.PublicKey]) hostInfos[h.PublicKey] = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, @@ -777,7 +777,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts host.PriceTable.HostBlockHeight = cs.BlockHeight // decide whether the host is still good - usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.HostInfo, minScore, contract.FileSize()) + usable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, contract.FileSize()) if !usable { reasons := unusableResult.reasons() toStopUsing[fcid] = strings.Join(reasons, ",") @@ -1346,7 +1346,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []api.Host, usedH // NOTE: ignore the pricetable's HostBlockHeight by setting it to our // own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - usable, result := isUsableHost(state.cfg, state.rs, gc, h.HostInfo, minScore, storedData[h.PublicKey]) + usable, result := isUsableHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) if usable { candidates = append(candidates, scoredHost{h.Host, result.scoreBreakdown.Score()}) continue diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 8de37221a..f41a20c94 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -11,7 +11,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/worker" ) @@ -176,7 +175,7 @@ func (u *unusableHostResult) keysAndValues() []interface{} { // isUsableHost returns whether the given host is usable along with a list of // reasons why it was deemed unusable. -func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h hostdb.HostInfo, minScore float64, storedData uint64) (bool, unusableHostResult) { +func isUsableHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h api.Host, minScore float64, storedData uint64) (bool, unusableHostResult) { if rs.Validate() != nil { panic("invalid redundancy settings were supplied - developer error") } diff --git a/autopilot/hostinfo.go b/autopilot/hostinfo.go index d48ce7760..d82062a80 100644 --- a/autopilot/hostinfo.go +++ b/autopilot/hostinfo.go @@ -52,7 +52,7 @@ func (c *contractor) HostInfo(ctx context.Context, hostKey types.PublicKey) (api // ignore the pricetable's HostBlockHeight by setting it to our own blockheight host.Host.PriceTable.HostBlockHeight = cs.BlockHeight - isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host.HostInfo, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.cfg, rs, gc, host, minScore, storedData) return api.HostHandlerResponse{ Host: host.Host, Checks: &api.HostHandlerResponseChecks{ @@ -89,7 +89,7 @@ func (c *contractor) hostInfoFromCache(ctx context.Context, host api.Host) (hi h } else { state := c.ap.State() gc := worker.NewGougingChecker(state.gs, cs, state.fee, state.cfg.Contracts.Period, state.cfg.Contracts.RenewWindow) - isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host.HostInfo, minScore, storedData) + isUsable, unusableResult := isUsableHost(state.cfg, state.rs, gc, host, minScore, storedData) hi = hostInfo{ Usable: isUsable, UnusableResult: unusableResult, diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 435e03b90..860a855fe 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -32,15 +32,11 @@ func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ( end = len(b.hosts) } - his := make([]api.Host, len(b.hosts[start:end])) + hosts := make([]api.Host, len(b.hosts[start:end])) for i, h := range b.hosts[start:end] { - his[i] = api.Host{ - HostInfo: hostdb.HostInfo{ - Host: h, - }, - } + hosts[i] = api.Host{Host: h} } - return his, nil + return hosts, nil } func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { diff --git a/bus/client/hosts.go b/bus/client/hosts.go index f0ab56fd1..8338d53f7 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -30,7 +30,7 @@ func (c *Client) HostBlocklist(ctx context.Context) (blocklist []string, err err } // Hosts returns 'limit' hosts at given 'offset'. -func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []hostdb.HostInfo, err error) { +func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []api.Host, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts?"+values.Encode(), &hosts) diff --git a/hostdb/hostdb.go b/hostdb/hostdb.go index 69ed80989..1f4c341de 100644 --- a/hostdb/hostdb.go +++ b/hostdb/hostdb.go @@ -114,12 +114,6 @@ type HostPriceTable struct { Expiry time.Time `json:"expiry"` } -// HostInfo extends the host type with a field indicating whether it is blocked or not. -type HostInfo struct { - Host - Blocked bool `json:"blocked"` -} - // IsAnnounced returns whether the host has been announced. func (h Host) IsAnnounced() bool { return !h.LastAnnouncement.IsZero() diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index e081c2a5d..77898d4cf 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -166,7 +166,7 @@ func TestNewTestCluster(t *testing.T) { if len(hi.Checks.UnusableReasons) != 0 { t.Fatal("usable hosts don't have any reasons set") } - if reflect.DeepEqual(hi.Host, hostdb.HostInfo{}) { + if reflect.DeepEqual(hi.Host, hostdb.Host{}) { t.Fatal("host wasn't set") } } @@ -188,7 +188,7 @@ func TestNewTestCluster(t *testing.T) { if len(hi.Checks.UnusableReasons) != 0 { t.Fatal("usable hosts don't have any reasons set") } - if reflect.DeepEqual(hi.Host, hostdb.HostInfo{}) { + if reflect.DeepEqual(hi.Host, hostdb.Host{}) { t.Fatal("host wasn't set") } allHosts[hi.Host.PublicKey] = struct{}{} diff --git a/stores/hostdb.go b/stores/hostdb.go index f97af5fe9..b91f85601 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -322,33 +322,31 @@ func (h dbHost) convert(blocked bool) api.Host { checks[check.DBAutopilot.Identifier] = check.convert() } return api.Host{ - HostInfo: hostdb.HostInfo{ - Host: hostdb.Host{ - KnownSince: h.CreatedAt, - LastAnnouncement: h.LastAnnouncement, - NetAddress: h.NetAddress, - Interactions: hostdb.Interactions{ - TotalScans: h.TotalScans, - LastScan: lastScan, - LastScanSuccess: h.LastScanSuccess, - SecondToLastScanSuccess: h.SecondToLastScanSuccess, - Uptime: h.Uptime, - Downtime: h.Downtime, - SuccessfulInteractions: h.SuccessfulInteractions, - FailedInteractions: h.FailedInteractions, - LostSectors: h.LostSectors, - }, - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: h.PriceTable.convert(), - Expiry: h.PriceTableExpiry.Time, - }, - PublicKey: types.PublicKey(h.PublicKey), - Scanned: h.Scanned, - Settings: h.Settings.convert(), + Host: hostdb.Host{ + KnownSince: h.CreatedAt, + LastAnnouncement: h.LastAnnouncement, + NetAddress: h.NetAddress, + Interactions: hostdb.Interactions{ + TotalScans: h.TotalScans, + LastScan: lastScan, + LastScanSuccess: h.LastScanSuccess, + SecondToLastScanSuccess: h.SecondToLastScanSuccess, + Uptime: h.Uptime, + Downtime: h.Downtime, + SuccessfulInteractions: h.SuccessfulInteractions, + FailedInteractions: h.FailedInteractions, + LostSectors: h.LostSectors, }, - Blocked: blocked, + PriceTable: hostdb.HostPriceTable{ + HostPriceTable: h.PriceTable.convert(), + Expiry: h.PriceTableExpiry.Time, + }, + PublicKey: types.PublicKey(h.PublicKey), + Scanned: h.Scanned, + Settings: h.Settings.convert(), }, - Checks: checks, + Blocked: blocked, + Checks: checks, } } diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 7e6d3afe9..baf83b39d 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -267,11 +267,9 @@ func newHostMock(hk types.PublicKey) *hostMock { return &hostMock{ hk: hk, hi: api.Host{ - HostInfo: hostdb.HostInfo{ - Host: hostdb.Host{ - PublicKey: hk, - Scanned: true, - }, + Host: hostdb.Host{ + PublicKey: hk, + Scanned: true, }, }, } From 7675c8ed5ef8058da0875c38d420d6e9f69d7e17 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 22 Mar 2024 09:38:55 +0100 Subject: [PATCH 103/201] api: revert SearchHostsRequest --- api/host.go | 7 +------ autopilot/autopilot.go | 2 +- autopilot/client.go | 16 +++++++--------- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/api/host.go b/api/host.go index cce3336ae..5221fb20c 100644 --- a/api/host.go +++ b/api/host.go @@ -43,18 +43,13 @@ type ( MinRecentScanFailures uint64 `json:"minRecentScanFailures"` } - // HostsRequest is the request type for the /api/autopilot/hosts endpoint. - HostsRequest struct { - UsabilityMode string `json:"usabilityMode"` - SearchHostsRequest - } - // SearchHostsRequest is the request type for the /api/bus/search/hosts // endpoint. SearchHostsRequest struct { Offset int `json:"offset"` Limit int `json:"limit"` FilterMode string `json:"filterMode"` + UsabilityMode string `json:"usabilityMode"` AddressContains string `json:"addressContains"` KeyIn []types.PublicKey `json:"keyIn"` } diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index a9fa52343..8127ade5f 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -726,7 +726,7 @@ func (ap *Autopilot) stateHandlerGET(jc jape.Context) { } func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.HostsRequest + var req api.SearchHostsRequest if jc.Decode(&req) != nil { return } diff --git a/autopilot/client.go b/autopilot/client.go index 336149f8a..01d0a1632 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -41,15 +41,13 @@ func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostHandlerResponse // HostInfo returns information about all hosts. func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostHandlerResponse, err error) { - err = c.c.POST("/hosts", api.HostsRequest{ - UsabilityMode: usabilityMode, - SearchHostsRequest: api.SearchHostsRequest{ - Offset: offset, - Limit: limit, - FilterMode: filterMode, - AddressContains: addressContains, - KeyIn: keyIn, - }, + err = c.c.POST("/hosts", api.SearchHostsRequest{ + Offset: offset, + Limit: limit, + FilterMode: filterMode, + UsabilityMode: usabilityMode, + AddressContains: addressContains, + KeyIn: keyIn, }, &resp) return } From 1cb3ac195455b9c561c8a71538d7952f0ffaa634 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 22 Mar 2024 09:40:45 +0100 Subject: [PATCH 104/201] client: fix HeadObject error response --- internal/test/e2e/s3_test.go | 6 ++++++ worker/client/client.go | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 6c13e8426..daaefed5e 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -113,6 +113,12 @@ func TestS3Basic(t *testing.T) { t.Fatal("unexpected ETag:", info.ETag) } + // stat object that doesn't exist + _, err = s3.StatObject(context.Background(), bucket, "nonexistent", minio.StatObjectOptions{}) + if err == nil || !strings.Contains(err.Error(), "The specified key does not exist") { + t.Fatal(err) + } + // add another bucket tt.OK(s3.MakeBucket(context.Background(), bucket+"2", minio.MakeBucketOptions{})) diff --git a/worker/client/client.go b/worker/client/client.go index d658ac027..fe284469f 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -100,9 +100,13 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H return nil, err } if resp.StatusCode != 200 && resp.StatusCode != 206 { - err, _ := io.ReadAll(resp.Body) _ = resp.Body.Close() - return nil, errors.New(string(err)) + switch resp.StatusCode { + case http.StatusNotFound: + return nil, api.ErrObjectNotFound + default: + return nil, errors.New(http.StatusText(resp.StatusCode)) + } } head, err := parseObjectResponseHeaders(resp.Header) From 247d0371efec09dc960426a82a8c4de57f573ec3 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 22 Mar 2024 10:00:47 +0100 Subject: [PATCH 105/201] worker: hex id when logging --- worker/rhpv2.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 1e3df8d0b..749e7e547 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -2,6 +2,7 @@ package worker import ( "context" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -286,8 +287,9 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types err = w.withContractLock(ctx, fcid, lockingPriorityPruning, func() error { return w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { return w.withRevisionV2(defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { + id := frand.Entropy128() logger := w.logger. - With("id", frand.Entropy128()). + With("id", hex.EncodeToString(id[:])). With("hostKey", hostKey). With("hostVersion", settings.Version). With("fcid", fcid). From ebf547dd337adf0ac64ee7f0bce38c0b43618967 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 02:05:55 +0000 Subject: [PATCH 106/201] build(deps): bump gorm.io/gorm from 1.25.7 to 1.25.8 Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.25.7 to 1.25.8. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.25.7...v1.25.8) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0c813f7bb..ae14f6691 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.4 gorm.io/driver/sqlite v1.5.5 - gorm.io/gorm v1.25.7 + gorm.io/gorm v1.25.8 lukechampine.com/frand v1.4.2 moul.io/zapgorm2 v1.3.0 ) diff --git a/go.sum b/go.sum index d15acfcf9..f896c5c85 100644 --- a/go.sum +++ b/go.sum @@ -411,8 +411,8 @@ gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A= -gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo= +gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= From 7c47f98c8b4be2ffffff84b5829ac6fa06c793d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 02:06:11 +0000 Subject: [PATCH 107/201] build(deps): bump github.com/go-gormigrate/gormigrate/v2 Bumps [github.com/go-gormigrate/gormigrate/v2](https://github.com/go-gormigrate/gormigrate) from 2.1.1 to 2.1.2. - [Release notes](https://github.com/go-gormigrate/gormigrate/releases) - [Changelog](https://github.com/go-gormigrate/gormigrate/blob/master/CHANGELOG.md) - [Commits](https://github.com/go-gormigrate/gormigrate/compare/v2.1.1...v2.1.2) --- updated-dependencies: - dependency-name: github.com/go-gormigrate/gormigrate/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 0c813f7bb..696243c82 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21.6 require ( github.com/gabriel-vasile/mimetype v1.4.3 - github.com/go-gormigrate/gormigrate/v2 v2.1.1 + github.com/go-gormigrate/gormigrate/v2 v2.1.2 github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.19.0 github.com/klauspost/reedsolomon v1.12.1 @@ -25,7 +25,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.4 gorm.io/driver/sqlite v1.5.5 - gorm.io/gorm v1.25.7 + gorm.io/gorm v1.25.8 lukechampine.com/frand v1.4.2 moul.io/zapgorm2 v1.3.0 ) diff --git a/go.sum b/go.sum index d15acfcf9..01dcd16ea 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gormigrate/gormigrate/v2 v2.1.1 h1:eGS0WTFRV30r103lU8JNXY27KbviRnqqIDobW3EV3iY= -github.com/go-gormigrate/gormigrate/v2 v2.1.1/go.mod h1:L7nJ620PFDKei9QOhJzqA8kRCk+E3UbV2f5gv+1ndLc= +github.com/go-gormigrate/gormigrate/v2 v2.1.2 h1:F/d1hpHbRAvKezziV2CC5KUE82cVe9zTgHSBoOOZ4CY= +github.com/go-gormigrate/gormigrate/v2 v2.1.2/go.mod h1:9nHVX6z3FCMCQPA7PThGcA55t22yKQfK/Dnsf5i7hUo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -411,8 +411,8 @@ gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A= -gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo= +gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= From e317b0fdb5fb26bb3d62ec94e93a122f903b9451 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 25 Mar 2024 10:18:30 +0100 Subject: [PATCH 108/201] worker: when migrating a slab, only set contracts for new shards --- worker/migrations.go | 4 ++-- worker/upload.go | 21 ++++----------------- worker/upload_test.go | 2 +- worker/worker.go | 2 +- 4 files changed, 8 insertions(+), 21 deletions(-) diff --git a/worker/migrations.go b/worker/migrations.go index 6c25b789f..075642dd5 100644 --- a/worker/migrations.go +++ b/worker/migrations.go @@ -10,7 +10,7 @@ import ( "go.sia.tech/renterd/object" ) -func (w *worker) migrate(ctx context.Context, s *object.Slab, contractSet string, dlContracts, ulContracts []api.ContractMetadata, bh uint64) (int, bool, error) { +func (w *worker) migrate(ctx context.Context, s object.Slab, contractSet string, dlContracts, ulContracts []api.ContractMetadata, bh uint64) (int, bool, error) { // make a map of good hosts goodHosts := make(map[types.PublicKey]map[types.FileContractID]bool) for _, c := range ulContracts { @@ -86,7 +86,7 @@ SHARDS: defer mem.Release() // download the slab - shards, surchargeApplied, err := w.downloadManager.DownloadSlab(ctx, *s, dlContracts) + shards, surchargeApplied, err := w.downloadManager.DownloadSlab(ctx, s, dlContracts) if err != nil { return 0, false, fmt.Errorf("failed to download slab for migration: %w", err) } diff --git a/worker/upload.go b/worker/upload.go index d146b920e..bc419d703 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -604,7 +604,7 @@ func (mgr *uploadManager) UploadPackedSlab(ctx context.Context, rs api.Redundanc return nil } -func (mgr *uploadManager) UploadShards(ctx context.Context, s *object.Slab, shardIndices []int, shards [][]byte, contractSet string, contracts []api.ContractMetadata, bh uint64, lockPriority int, mem Memory) (err error) { +func (mgr *uploadManager) UploadShards(ctx context.Context, s object.Slab, shardIndices []int, shards [][]byte, contractSet string, contracts []api.ContractMetadata, bh uint64, lockPriority int, mem Memory) (err error) { // cancel all in-flight requests when the upload is done ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -642,27 +642,14 @@ func (mgr *uploadManager) UploadShards(ctx context.Context, s *object.Slab, shar // overwrite the shards with the newly uploaded ones for i, si := range shardIndices { s.Shards[si].LatestHost = uploaded[i].LatestHost - - knownContracts := make(map[types.FileContractID]struct{}) - for _, fcids := range s.Shards[si].Contracts { - for _, fcid := range fcids { - knownContracts[fcid] = struct{}{} - } - } + s.Shards[si].Contracts = make(map[types.PublicKey][]types.FileContractID) for hk, fcids := range uploaded[i].Contracts { - for _, fcid := range fcids { - if _, exists := knownContracts[fcid]; !exists { - if s.Shards[si].Contracts == nil { - s.Shards[si].Contracts = make(map[types.PublicKey][]types.FileContractID) - } - s.Shards[si].Contracts[hk] = append(s.Shards[si].Contracts[hk], fcid) - } - } + s.Shards[si].Contracts[hk] = append(s.Shards[si].Contracts[hk], fcids...) } } // update the slab - return mgr.os.UpdateSlab(ctx, *s, contractSet) + return mgr.os.UpdateSlab(ctx, s, contractSet) } func (mgr *uploadManager) candidates(allowed map[types.PublicKey]struct{}) (candidates []*uploader) { diff --git a/worker/upload_test.go b/worker/upload_test.go index 1d441693f..cb4a7ee7b 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -340,7 +340,7 @@ func TestUploadShards(t *testing.T) { // migrate those shards away from bad hosts mem := mm.AcquireMemory(context.Background(), uint64(len(badIndices))*rhpv2.SectorSize) - err = ul.UploadShards(context.Background(), &o.Object.Object.Slabs[0].Slab, badIndices, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) + err = ul.UploadShards(context.Background(), o.Object.Object.Slabs[0].Slab, badIndices, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) if err != nil { t.Fatal(err) } diff --git a/worker/worker.go b/worker/worker.go index 89fe37a14..c6ce7a67a 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -784,7 +784,7 @@ func (w *worker) slabMigrateHandler(jc jape.Context) { } // migrate the slab - numShardsMigrated, surchargeApplied, err := w.migrate(ctx, &slab, up.ContractSet, dlContracts, ulContracts, up.CurrentHeight) + numShardsMigrated, surchargeApplied, err := w.migrate(ctx, slab, up.ContractSet, dlContracts, ulContracts, up.CurrentHeight) if err != nil { jc.Encode(api.MigrateSlabResponse{ NumShardsMigrated: numShardsMigrated, From 66f9df2c984eaa40866fc8a7a25253517658f100 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 25 Mar 2024 11:11:37 +0100 Subject: [PATCH 109/201] ci: make use of reusable project-add.yml --- .github/workflows/project-add.yml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/project-add.yml b/.github/workflows/project-add.yml index 3304fc0db..63df05bc2 100644 --- a/.github/workflows/project-add.yml +++ b/.github/workflows/project-add.yml @@ -10,12 +10,5 @@ on: jobs: add-to-project: - name: Add issue to project - runs-on: ubuntu-latest - steps: - - uses: actions/add-to-project@v0.5.0 - with: - # You can target a project in a different organization - # to the issue - project-url: https://github.com/orgs/SiaFoundation/projects/5 - github-token: ${{ secrets.PAT_ADD_TO_PROJECT }} + uses: SiaFoundation/workflows/.github/workflows/project-add.yml@master + From 11a4ff18bce249dbacfa654fbbef7db087215b15 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 25 Mar 2024 11:18:00 +0100 Subject: [PATCH 110/201] ci: inherit secrets --- .github/workflows/project-add.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/project-add.yml b/.github/workflows/project-add.yml index 63df05bc2..c61a17b0d 100644 --- a/.github/workflows/project-add.yml +++ b/.github/workflows/project-add.yml @@ -11,4 +11,5 @@ on: jobs: add-to-project: uses: SiaFoundation/workflows/.github/workflows/project-add.yml@master + secrets: inherit From 4b3968573d8fcaeda663f3ba119cb7313ecd76ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 10:31:31 +0000 Subject: [PATCH 111/201] build(deps): bump gorm.io/driver/mysql from 1.5.4 to 1.5.6 Bumps [gorm.io/driver/mysql](https://github.com/go-gorm/mysql) from 1.5.4 to 1.5.6. - [Commits](https://github.com/go-gorm/mysql/compare/v1.5.4...v1.5.6) --- updated-dependencies: - dependency-name: gorm.io/driver/mysql dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 696243c82..22515806c 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( golang.org/x/crypto v0.21.0 golang.org/x/term v0.18.0 gopkg.in/yaml.v3 v3.0.1 - gorm.io/driver/mysql v1.5.4 + gorm.io/driver/mysql v1.5.6 gorm.io/driver/sqlite v1.5.5 gorm.io/gorm v1.25.8 lukechampine.com/frand v1.4.2 diff --git a/go.sum b/go.sum index 01dcd16ea..81612096a 100644 --- a/go.sum +++ b/go.sum @@ -405,12 +405,12 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/mysql v1.5.4 h1:igQmHfKcbaTVyAIHNhhB888vvxh8EdQ2uSUT0LPcBso= -gorm.io/driver/mysql v1.5.4/go.mod h1:9rYxJph/u9SWkWc9yY4XJ1F/+xO0S/ChOmbk3+Z5Tvs= +gorm.io/driver/mysql v1.5.6 h1:Ld4mkIickM+EliaQZQx3uOJDJHtrd70MxAUqWqlx3Y8= +gorm.io/driver/mysql v1.5.6/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo= gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From f0eef4fd331397f5ba801636f17041a23c0ac41a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 25 Mar 2024 12:20:27 +0100 Subject: [PATCH 112/201] worker: add TestMigrateLostSector --- worker/mocks_test.go | 44 +++++++++++++++-- worker/upload_test.go | 108 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 148 insertions(+), 4 deletions(-) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 7b3609c0b..b8257e95c 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -388,6 +388,21 @@ func (os *objectStoreMock) TrackUpload(ctx context.Context, uID api.UploadID) er func (os *objectStoreMock) FinishUpload(ctx context.Context, uID api.UploadID) error { return nil } func (os *objectStoreMock) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { + os.mu.Lock() + defer os.mu.Unlock() + + for _, objects := range os.objects { + for _, object := range objects { + for _, slab := range object.Slabs { + for _, shard := range slab.Slab.Shards { + if shard.Root == root { + delete(shard.Contracts, hk) + } + } + } + } + } + return nil } @@ -503,10 +518,33 @@ func (os *objectStoreMock) UpdateSlab(ctx context.Context, s object.Slab, contra os.forEachObject(func(bucket, path string, o object.Object) { for i, slab := range o.Slabs { - if slab.Key.String() == s.Key.String() { - os.objects[bucket][path].Slabs[i].Slab = s - return + if slab.Key.String() != s.Key.String() { + continue + } + // update slab + shards := os.objects[bucket][path].Slabs[i].Slab.Shards + for sI := range shards { + // overwrite latest host + shards[sI].LatestHost = s.Shards[sI].LatestHost + + // merge contracts for each shard + existingContracts := make(map[types.FileContractID]struct{}) + for _, fcids := range shards[sI].Contracts { + for _, fcid := range fcids { + existingContracts[fcid] = struct{}{} + } + } + for hk, fcids := range s.Shards[sI].Contracts { + for _, fcid := range fcids { + if _, exists := existingContracts[fcid]; exists { + continue + } + shards[sI].Contracts[hk] = append(shards[sI].Contracts[hk], fcids...) + } + } } + os.objects[bucket][path].Slabs[i].Slab.Shards = shards + return } }) diff --git a/worker/upload_test.go b/worker/upload_test.go index cb4a7ee7b..0b6308ffe 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -263,6 +263,107 @@ func TestUploadPackedSlab(t *testing.T) { } } +func TestMigrateLostSector(t *testing.T) { + // create test worker + w := newTestWorker(t) + + // add hosts to worker + w.AddHosts(testRedundancySettings.TotalShards * 2) + + // convenience variables + os := w.os + mm := w.ulmm + dl := w.downloadManager + ul := w.uploadManager + + // create test data + data := frand.Bytes(128) + + // create upload params + params := testParameters(t.Name()) + + // upload data + _, _, err := ul.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) + if err != nil { + t.Fatal(err) + } + + // grab the slab + o, err := os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) + if err != nil { + t.Fatal(err) + } else if len(o.Object.Object.Slabs) != 1 { + t.Fatal("expected 1 slab") + } + slab := o.Object.Object.Slabs[0] + + // build usedHosts hosts + usedHosts := make(map[types.PublicKey]struct{}) + for _, shard := range slab.Shards { + usedHosts[shard.LatestHost] = struct{}{} + } + + // assume the host of the first shard lost its sector + badHost := slab.Shards[0].LatestHost + badContract := slab.Shards[0].Contracts[badHost][0] + err = os.DeleteHostSector(context.Background(), badHost, slab.Shards[0].Root) + if err != nil { + t.Fatal(err) + } + + // download the slab + shards, _, err := dl.DownloadSlab(context.Background(), slab.Slab, w.Contracts()) + if err != nil { + t.Fatal(err) + } + + // encrypt the shards + o.Object.Object.Slabs[0].Slab.Encrypt(shards) + + // filter it down to the shards we need to migrate + shards = shards[:1] + + // recreate upload contracts + contracts := make([]api.ContractMetadata, 0) + for _, c := range w.Contracts() { + _, used := usedHosts[c.HostKey] + if !used && c.HostKey != badHost { + contracts = append(contracts, c) + } + } + + // migrate the shard away from the bad host + mem := mm.AcquireMemory(context.Background(), rhpv2.SectorSize) + err = ul.UploadShards(context.Background(), o.Object.Object.Slabs[0].Slab, []int{0}, shards, testContractSet, contracts, 0, lockingPriorityUpload, mem) + if err != nil { + t.Fatal(err) + } + + // re-grab the slab + o, err = os.Object(context.Background(), testBucket, t.Name(), api.GetObjectOptions{}) + if err != nil { + t.Fatal(err) + } else if len(o.Object.Object.Slabs) != 1 { + t.Fatal("expected 1 slab") + } + slab = o.Object.Object.Slabs[0] + + // assert the bad shard is on a good host now + shard := slab.Shards[0] + if shard.LatestHost == badHost { + t.Fatal("latest host is bad") + } else if len(shard.Contracts) != 1 { + t.Fatal("expected 1 contract") + } + for _, fcids := range shard.Contracts { + for _, fcid := range fcids { + if fcid == badContract { + t.Fatal("contract belongs to bad host") + } + } + } +} + func TestUploadShards(t *testing.T) { // create test worker w := newTestWorker(t) @@ -355,7 +456,12 @@ func TestUploadShards(t *testing.T) { slab = o.Object.Object.Slabs[0] // assert none of the shards are on bad hosts - for _, shard := range slab.Shards { + for i, shard := range slab.Shards { + if i%2 == 0 && len(shard.Contracts) != 1 { + t.Fatalf("expected 1 contract, got %v", len(shard.Contracts)) + } else if i%2 != 0 && len(shard.Contracts) != 2 { + t.Fatalf("expected 2 contracts, got %v", len(shard.Contracts)) + } if _, bad := badHosts[shard.LatestHost]; bad { t.Fatal("shard is on bad host", shard.LatestHost) } From 80eca756f0f280ae4c777a635266495d881ec35d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 25 Mar 2024 15:46:53 +0100 Subject: [PATCH 113/201] worker: update logging in scanHost and apply timeout to each step of scanning --- worker/worker.go | 99 ++++++++++++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 41 deletions(-) diff --git a/worker/worker.go b/worker/worker.go index d0de33f71..a39fc608e 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1436,26 +1436,31 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty logger := w.logger.With("host", hostKey).With("hostIP", hostIP).With("timeout", timeout) // prepare a helper for scanning scan := func() (rhpv2.HostSettings, rhpv3.HostPriceTable, time.Duration, error) { - // apply timeout - scanCtx := ctx - var cancel context.CancelFunc - if timeout > 0 { - scanCtx, cancel = context.WithTimeout(scanCtx, timeout) - defer cancel() - } - // resolve hostIP. We don't want to scan hosts on private networks. - if !w.allowPrivateIPs { - host, _, err := net.SplitHostPort(hostIP) - if err != nil { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err + // helper to prepare a context for scanning + withTimeoutCtx := func() (context.Context, context.CancelFunc) { + if timeout > 0 { + return context.WithTimeout(ctx, timeout) } - addrs, err := (&net.Resolver{}).LookupIPAddr(scanCtx, host) - if err != nil { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err - } - for _, addr := range addrs { - if isPrivateIP(addr.IP) { - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, api.ErrHostOnPrivateNetwork + return ctx, func() {} + } + // resolve the address + { + scanCtx, cancel := withTimeoutCtx() + defer cancel() + // resolve hostIP. We don't want to scan hosts on private networks. + if !w.allowPrivateIPs { + host, _, err := net.SplitHostPort(hostIP) + if err != nil { + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err + } + addrs, err := (&net.Resolver{}).LookupIPAddr(scanCtx, host) + if err != nil { + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, err + } + for _, addr := range addrs { + if isPrivateIP(addr.IP) { + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, api.ErrHostOnPrivateNetwork + } } } } @@ -1463,37 +1468,49 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // fetch the host settings start := time.Now() var settings rhpv2.HostSettings - err := w.withTransportV2(scanCtx, hostKey, hostIP, func(t *rhpv2.Transport) error { - var err error - if settings, err = RPCSettings(scanCtx, t); err != nil { - return fmt.Errorf("failed to fetch host settings: %w", err) + { + scanCtx, cancel := withTimeoutCtx() + defer cancel() + err := w.withTransportV2(scanCtx, hostKey, hostIP, func(t *rhpv2.Transport) error { + var err error + if settings, err = RPCSettings(scanCtx, t); err != nil { + return fmt.Errorf("failed to fetch host settings: %w", err) + } + // NOTE: we overwrite the NetAddress with the host address here + // since we just used it to dial the host we know it's valid + settings.NetAddress = hostIP + return nil + }) + if err != nil { + return settings, rhpv3.HostPriceTable{}, time.Since(start), err } - // NOTE: we overwrite the NetAddress with the host address here - // since we just used it to dial the host we know it's valid - settings.NetAddress = hostIP - return nil - }) - elapsed := time.Since(start) - if err != nil { - return settings, rhpv3.HostPriceTable{}, elapsed, err } // fetch the host pricetable var pt rhpv3.HostPriceTable - err = w.transportPoolV3.withTransportV3(scanCtx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { - if hpt, err := RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil }); err != nil { - return fmt.Errorf("failed to fetch host price table: %w", err) - } else { - pt = hpt.HostPriceTable - return nil + { + scanCtx, cancel := withTimeoutCtx() + defer cancel() + err := w.transportPoolV3.withTransportV3(scanCtx, hostKey, settings.SiamuxAddr(), func(ctx context.Context, t *transportV3) error { + if hpt, err := RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil }); err != nil { + return fmt.Errorf("failed to fetch host price table: %w", err) + } else { + pt = hpt.HostPriceTable + return nil + } + }) + if err != nil { + return settings, rhpv3.HostPriceTable{}, time.Since(start), err } - }) - return settings, pt, elapsed, err + } + return settings, pt, time.Since(start), nil } // scan: first try settings, pt, duration, err := scan() if err != nil { + logger = logger.With(zap.Error(err)) + // scan: second try select { case <-ctx.Done(): @@ -1502,11 +1519,11 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty } settings, pt, duration, err = scan() - logger = logger.With("elapsed", duration) + logger = logger.With("elapsed", duration).With(zap.Error(err)) if err == nil { logger.Info("successfully scanned host on second try") } else if !isErrHostUnreachable(err) { - logger.Infow("failed to scan host", zap.Error(err)) + logger.Infow("failed to scan host") } } From 535a4d9015dc9d096619682641720c4683e4a172 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 16:14:08 +0100 Subject: [PATCH 114/201] stores: add autopilot and usability filter to SearchHosts --- api/host.go | 12 ++----- autopilot/autopilot.go | 7 ++++ autopilot/contractor.go | 19 ++++++----- bus/bus.go | 74 +++-------------------------------------- bus/client/hosts.go | 2 ++ stores/hostdb.go | 42 +++++++++++++++++++++-- stores/hostdb_test.go | 71 ++++++++++++++++++++++++++++++++++----- 7 files changed, 131 insertions(+), 96 deletions(-) diff --git a/api/host.go b/api/host.go index 0691c4d56..46a5597b3 100644 --- a/api/host.go +++ b/api/host.go @@ -24,10 +24,6 @@ var ( // ErrHostNotFound is returned when a host can't be retrieved from the // database. ErrHostNotFound = errors.New("host doesn't exist in hostdb") - - // ErrHostInfoNotFound is returned when host info can't be retrieved from - // the database. - ErrHostInfoNotFound = errors.New("host info doesn't exist in hostdb") ) var ( @@ -71,11 +67,8 @@ type ( KeyIn []types.PublicKey `json:"keyIn"` } - // HostsRequest is the request type for the POST /autopilot/:id/hosts - // endpoint. - HostsRequest SearchHostsRequest - - // HostResponse is the response type for the /host/:hostkey endpoint. + // HostResponse is the response type for the GET + // /api/autopilot/host/:hostkey endpoint. HostResponse struct { Host hostdb.Host `json:"host"` Checks *HostChecks `json:"checks,omitempty"` @@ -120,6 +113,7 @@ type ( } SearchHostOptions struct { + AutopilotID string AddressContains string FilterMode string UsabilityMode string diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 9b635dcbd..97ea4e939 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -705,6 +705,13 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { var req api.SearchHostOptions if jc.Decode(&req) != nil { return + } else if req.AutopilotID != "" && req.AutopilotID != ap.id { + jc.Error(errors.New("invalid autopilot id"), http.StatusBadRequest) + return + } else { + // TODO: on next major release we should not re-use options between bus + // and autopilot API if we don't support all fields in both + req.AutopilotID = ap.id } // TODO: remove on next major release diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 270ea701f..0e85e4302 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -713,12 +713,22 @@ func (c *contractor) runContractChecks(ctx context.Context, contracts []api.Cont // fetch host from hostdb host, err := c.ap.bus.Host(ctx, hk) if err != nil { - c.logger.Errorw(fmt.Sprintf("missing host, err: %v", err), "hk", hk) + c.logger.Warn(fmt.Sprintf("missing host, err: %v", err), "hk", hk) toStopUsing[fcid] = api.ErrUsabilityHostNotFound.Error() notfound++ continue } + // fetch host checks + check, ok := host.Checks[c.ap.id] + if !ok { + // this is only possible due to developer error, if there is no + // check the host would have been missing, so we treat it the same + c.logger.Warnw("missing host check", "hk", hk) + toStopUsing[fcid] = api.ErrUsabilityHostNotFound.Error() + continue + } + // if the host is blocked we ignore it, it might be unblocked later if host.Blocked { c.logger.Infow("unusable host", "hk", hk, "fcid", fcid, "reasons", api.ErrUsabilityHostBlocked.Error()) @@ -726,13 +736,6 @@ func (c *contractor) runContractChecks(ctx context.Context, contracts []api.Cont continue } - // grab the host check - check, ok := host.Checks[c.ap.id] - if !ok { - c.logger.Errorw("missing host check", "hk", hk) - continue - } - // check if the host is still usable if !check.Usability.IsUsable() { reasons := check.Usability.UnusableReasons() diff --git a/bus/bus.go b/bus/bus.go index 1fc5cfff8..d9eba1e96 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -253,9 +253,7 @@ func (b *bus) Handler() http.Handler { "GET /autopilot/:id": b.autopilotsHandlerGET, "PUT /autopilot/:id": b.autopilotsHandlerPUT, - "GET /autopilot/:id/host/:hostkey": b.autopilotHostHandlerGET, - "PUT /autopilot/:id/host/:hostkey": b.autopilotHostHandlerPUT, - "POST /autopilot/:id/hosts": b.autopilotHostsHandlerGET, + "PUT /autopilot/:id/host/:hostkey/checks": b.autopilotHostChecksHandlerPUT, "GET /buckets": b.bucketsHandlerGET, "POST /buckets": b.bucketsHandlerPOST, @@ -780,8 +778,9 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { } // TODO: on the next major release: - // - properly default search params - // - properly validate and return 400 + // - properly default search params (currenlty no defaults are set) + // - properly validate and return 400 (currently validation is done in autopilot and the store) + hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.AutopilotID, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) if jc.Check(fmt.Sprintf("couldn't fetch hosts %d-%d", req.Offset, req.Offset+req.Limit), err) != nil { return @@ -1970,27 +1969,7 @@ func (b *bus) autopilotsHandlerPUT(jc jape.Context) { jc.Check("failed to update autopilot", b.as.UpdateAutopilot(jc.Request.Context(), ap)) } -func (b *bus) autopilotHostHandlerGET(jc jape.Context) { - var id string - if jc.DecodeParam("id", &id) != nil { - return - } - var hk types.PublicKey - if jc.DecodeParam("hostkey", &hk) != nil { - return - } - - hi, err := b.hdb.Host(jc.Request.Context(), hk) - if errors.Is(err, api.ErrAutopilotNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if jc.Check("failed to fetch host info", err) != nil { - return - } - jc.Encode(hi) -} - -func (b *bus) autopilotHostHandlerPUT(jc jape.Context) { +func (b *bus) autopilotHostChecksHandlerPUT(jc jape.Context) { var id string if jc.DecodeParam("id", &id) != nil { return @@ -2013,49 +1992,6 @@ func (b *bus) autopilotHostHandlerPUT(jc jape.Context) { } } -func (b *bus) autopilotHostsHandlerGET(jc jape.Context) { - var id string - if jc.DecodeParam("id", &id) != nil { - return - } - var req api.HostsRequest - if jc.Decode(&req) != nil { - return - } - - // validate filter mode - if fm := req.FilterMode; fm != "" { - if fm != api.HostFilterModeAll && - fm != api.HostFilterModeAllowed && - fm != api.HostFilterModeBlocked { - jc.Error(fmt.Errorf("invalid filter mode: '%v', allowed values are '%s', '%s', '%s'", fm, api.HostFilterModeAll, api.HostFilterModeAllowed, api.HostFilterModeBlocked), http.StatusBadRequest) - return - } - } - - // validate usability mode - if um := req.UsabilityMode; um != "" { - if um != api.UsabilityFilterModeUsable && - um != api.UsabilityFilterModeUnusable && - um != api.UsabilityFilterModeAll { - jc.Error(fmt.Errorf("invalid usability mode: '%v', allowed values are '%s', '%s', '%s'", um, api.UsabilityFilterModeAll, api.UsabilityFilterModeUsable, api.UsabilityFilterModeUnusable), http.StatusBadRequest) - return - } else if id == "" { - jc.Error(errors.New("usability mode requires autopilot id"), http.StatusBadRequest) - return - } - } - - his, err := b.hdb.SearchHosts(jc.Request.Context(), id, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) - if errors.Is(err, api.ErrAutopilotNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if jc.Check("failed to fetch host infos", err) != nil { - return - } - jc.Encode(his) -} - func (b *bus) contractTaxHandlerGET(jc jape.Context) { var payout types.Currency if jc.DecodeParam("payout", (*api.ParamCurrency)(&payout)) != nil { diff --git a/bus/client/hosts.go b/bus/client/hosts.go index e84e3cea9..4e1aeab30 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -80,9 +80,11 @@ func (c *Client) ResetLostSectors(ctx context.Context, hostKey types.PublicKey) // SearchHosts returns all hosts that match certain search criteria. func (c *Client) SearchHosts(ctx context.Context, opts api.SearchHostOptions) (hosts []api.Host, err error) { err = c.c.WithContext(ctx).POST("/search/hosts", api.SearchHostsRequest{ + AutopilotID: opts.AutopilotID, Offset: opts.Offset, Limit: opts.Limit, FilterMode: opts.FilterMode, + UsabilityMode: opts.UsabilityMode, AddressContains: opts.AddressContains, KeyIn: opts.KeyIn, }, &hosts) diff --git a/stores/hostdb.go b/stores/hostdb.go index 9c7be8e9e..a121ab7d0 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -627,9 +627,11 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us query := ss.db. Model(&dbHost{}). Scopes( + autopilotFilter(autopilotID), hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist()), hostNetAddress(addressContains), hostPublicKey(keyIn), + usabilityFilter(usabilityMode), ) // preload allowlist and blocklist @@ -639,12 +641,23 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us Preload("Blocklist") } - // preload host checks - query = query.Preload("Checks.DBAutopilot") + // filter checks + if autopilotID != "" { + query = query.Preload("Checks.DBAutopilot", "identifier = ?", autopilotID) + } else { + query = query.Preload("Checks.DBAutopilot") + } + // query = query. + // Preload("Checks.DBAutopilot"). + // Scopes( + // autopilotFilter(autopilotID), + // usabilityFilter(usabilityMode), + // ) var hosts []api.Host var fullHosts []dbHost err := query. + Debug(). Offset(offset). Limit(limit). FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { @@ -1089,6 +1102,17 @@ func hostPublicKey(keyIn []types.PublicKey) func(*gorm.DB) *gorm.DB { } } +// autopilotFilter can be used as a scope to filter host checks based on their +// autopilot +func autopilotFilter(autopilotID string) func(*gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + if autopilotID == "" { + return db.Preload("Checks.DBAutopilot") + } + return db.Preload("Checks.DBAutopilot", "identifier = ?", autopilotID) + } +} + // hostFilter can be used as a scope to filter hosts based on their filter mode, // returning either all, allowed or blocked hosts. func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool) func(*gorm.DB) *gorm.DB { @@ -1120,6 +1144,20 @@ func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool) func(*gorm.D } } +func usabilityFilter(usabilityMode string) func(*gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + switch usabilityMode { + case api.UsabilityFilterModeUsable: + db = db.Preload("Checks", "usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ?", false, false, false, false, false, false, false, false) + case api.UsabilityFilterModeUnusable: + db = db.Preload("Checks", "usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ?", true, true, true, true, true, true, true, true) + case api.UsabilityFilterModeAll: + // do nothing + } + return db + } +} + func (ss *SQLStore) isBlocked(h dbHost) (blocked bool) { ss.mu.Lock() defer ss.mu.Unlock() diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 55a318e5c..196170b01 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -382,6 +382,61 @@ func TestSearchHosts(t *testing.T) { t.Fatal("unexpected", c3, ok) } + // assert autopilot filter is taken into account + his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if cnt != 3 { + t.Fatal("unexpected", cnt) + } + + // assert h1 and h2 have the expected checks + if c1, ok := his[0].Checks[ap1]; !ok || c1 != h1c { + t.Fatal("unexpected", c1, ok) + } else if c2, ok := his[1].Checks[ap1]; !ok || c2 != h2c1 { + t.Fatal("unexpected", c2, ok) + } else if _, ok := his[1].Checks[ap2]; ok { + t.Fatal("unexpected") + } + + // assert usability filter is taken into account + h2c1.Usability.RedundantIP = true + err = ss.UpdateHostCheck(context.Background(), ap1, hk2, h2c1) + if err != nil { + t.Fatal(err) + } + his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if cnt != 3 { + t.Fatal("unexpected", cnt) + } + + // assert h1 and h2 have the expected checks + if c1, ok := his[0].Checks[ap1]; !ok || c1 != h1c { + t.Fatal("unexpected", c1, ok) + } else if _, ok := his[1].Checks[ap1]; ok { + t.Fatal("unexpected", ok) + } else if _, ok := his[1].Checks[ap2]; ok { + t.Fatal("unexpected") + } + + his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) + if err != nil { + t.Fatal(err) + } else if cnt != 3 { + t.Fatal("unexpected", cnt) + } + + // assert h1 and h2 have the expected checks + if _, ok := his[0].Checks[ap1]; ok { + t.Fatal("unexpected") + } else if c2, ok := his[1].Checks[ap1]; !ok || c2 != h2c1 { + t.Fatal("unexpected", ok) + } else if _, ok := his[1].Checks[ap2]; ok { + t.Fatal("unexpected") + } + // assert cascade delete on host err = ss.db.Exec("DELETE FROM hosts WHERE public_key = ?", publicKey(types.PublicKey{1})).Error if err != nil { @@ -1310,14 +1365,14 @@ func newTestHostCheck() api.HostCheck { Prices: .7, }, Usability: api.HostUsabilityBreakdown{ - Blocked: true, - Offline: true, - LowScore: true, - RedundantIP: true, - Gouging: true, - NotAcceptingContracts: true, - NotAnnounced: true, - NotCompletingScan: true, + Blocked: false, + Offline: false, + LowScore: false, + RedundantIP: false, + Gouging: false, + NotAcceptingContracts: false, + NotAnnounced: false, + NotCompletingScan: false, }, } } From 104731b72197354f9811a85992684de571c782fa Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 16:17:07 +0100 Subject: [PATCH 115/201] bus: fix typo --- bus/bus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bus/bus.go b/bus/bus.go index d9eba1e96..f8bd2effa 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -778,7 +778,7 @@ func (b *bus) searchHostsHandlerPOST(jc jape.Context) { } // TODO: on the next major release: - // - properly default search params (currenlty no defaults are set) + // - properly default search params (currently no defaults are set) // - properly validate and return 400 (currently validation is done in autopilot and the store) hosts, err := b.hdb.SearchHosts(jc.Request.Context(), req.AutopilotID, req.FilterMode, req.UsabilityMode, req.AddressContains, req.KeyIn, req.Offset, req.Limit) From b30999c07087e1fab4611915e93e63a650499ddd Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 25 Mar 2024 17:20:33 +0100 Subject: [PATCH 116/201] contractor: add forgiveness period for failed refreshes --- autopilot/contractor.go | 33 +++++++++++++++++++++++++++++++++ autopilot/contractor_test.go | 31 +++++++++++++++++++++++++++++++ autopilot/hostfilter.go | 4 ++-- 3 files changed, 66 insertions(+), 2 deletions(-) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 7b2ea9863..82ea4e619 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -33,6 +33,10 @@ const ( // contract. estimatedFileContractTransactionSetSize = 2048 + // failedRenewalForgivenessPeriod is the amount of time we wait before + // punishing a contract for not being able to refresh + failedRefreshForgivenessPeriod = 24 * time.Hour + // leewayPctCandidateHosts is the leeway we apply when fetching candidate // hosts, we fetch ~10% more than required leewayPctCandidateHosts = 1.1 @@ -96,6 +100,8 @@ type ( revisionLastBroadcast map[types.FileContractID]time.Time revisionSubmissionBuffer uint64 + firstRefreshFailure map[types.FileContractID]time.Time + mu sync.Mutex pruning bool @@ -162,6 +168,8 @@ func newContractor(ap *Autopilot, revisionSubmissionBuffer uint64, revisionBroad revisionLastBroadcast: make(map[types.FileContractID]time.Time), revisionSubmissionBuffer: revisionSubmissionBuffer, + firstRefreshFailure: make(map[types.FileContractID]time.Time), + resolver: newIPResolver(ap.shutdownCtx, resolverLookupTimeout, ap.logger.Named("resolver")), } } @@ -226,6 +234,9 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( contracts := resp.Contracts c.logger.Infof("fetched %d contracts from the worker, took %v", len(resp.Contracts), time.Since(start)) + // prune contract refresh failure map + c.pruneContractRefreshFailures(contracts) + // run revision broadcast c.runRevisionBroadcast(ctx, w, contracts, isInCurrentSet) @@ -1624,6 +1635,28 @@ func (c *contractor) hostForContract(ctx context.Context, fcid types.FileContrac return } +func (c *contractor) pruneContractRefreshFailures(contracts []api.Contract) { + contractMap := make(map[types.FileContractID]struct{}) + for _, contract := range contracts { + contractMap[contract.ID] = struct{}{} + } + for fcid := range c.firstRefreshFailure { + if _, ok := contractMap[fcid]; !ok { + delete(c.firstRefreshFailure, fcid) + } + } +} + +func (c *contractor) shouldForgiveFailedRefresh(fcid types.FileContractID) bool { + lastFailure, exists := c.firstRefreshFailure[fcid] + if !exists { + lastFailure = time.Now() + c.firstRefreshFailure[fcid] = lastFailure + } + fmt.Println(time.Since(lastFailure)) + return time.Since(lastFailure) < failedRefreshForgivenessPeriod +} + func addLeeway(n uint64, pct float64) uint64 { if pct < 0 { panic("given leeway percent has to be positive") diff --git a/autopilot/contractor_test.go b/autopilot/contractor_test.go index 575605612..9ce54daf5 100644 --- a/autopilot/contractor_test.go +++ b/autopilot/contractor_test.go @@ -3,8 +3,12 @@ package autopilot import ( "math" "testing" + "time" + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" "go.uber.org/zap" + "lukechampine.com/frand" ) func TestCalculateMinScore(t *testing.T) { @@ -35,3 +39,30 @@ func TestCalculateMinScore(t *testing.T) { t.Fatalf("expected minScore to be math.SmallestNonzeroFLoat64 but was %v", minScore) } } + +func TestShouldForgiveFailedRenewal(t *testing.T) { + var fcid types.FileContractID + frand.Read(fcid[:]) + c := &contractor{ + firstRefreshFailure: make(map[types.FileContractID]time.Time), + } + + // try twice since the first time will set the failure time + if !c.shouldForgiveFailedRefresh(fcid) { + t.Fatal("should forgive") + } else if !c.shouldForgiveFailedRefresh(fcid) { + t.Fatal("should forgive") + } + + // set failure to be a full period in the past + c.firstRefreshFailure[fcid] = time.Now().Add(-failedRefreshForgivenessPeriod - time.Second) + if c.shouldForgiveFailedRefresh(fcid) { + t.Fatal("should not forgive") + } + + // prune map + c.pruneContractRefreshFailures([]api.Contract{}) + if len(c.firstRefreshFailure) != 0 { + t.Fatal("expected no failures") + } +} diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index f41a20c94..d64c1f3e3 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -254,8 +254,8 @@ func (c *contractor) isUsableContract(cfg api.AutopilotConfig, state state, ci c } if isOutOfFunds(cfg, pt, contract) { reasons = append(reasons, errContractOutOfFunds.Error()) - usable = false - recoverable = true + usable = usable && c.shouldForgiveFailedRefresh(contract.ID) + recoverable = !usable // only needs to be recoverable if !usable refresh = true renew = false } From a6b5d16eb5774e73b2ec52e6612bdfb6d766c52d Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 19:52:18 +0100 Subject: [PATCH 117/201] autopilot: fix response types --- api/autopilot.go | 16 ---------------- autopilot/client.go | 2 +- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/api/autopilot.go b/api/autopilot.go index 22598b28c..23425aacf 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -4,7 +4,6 @@ import ( "errors" "go.sia.tech/core/types" - "go.sia.tech/renterd/hostdb" ) const ( @@ -119,21 +118,6 @@ type ( } Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` } - - // HostHandlerResponse is the response type for the /host/:hostkey endpoint. - HostHandlerResponse struct { - Host hostdb.Host `json:"host"` - Checks *HostHandlerResponseChecks `json:"checks,omitempty"` - } - - HostHandlerResponseChecks struct { - Gouging bool `json:"gouging"` - GougingBreakdown HostGougingBreakdown `json:"gougingBreakdown"` - Score float64 `json:"score"` - ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` - Usable bool `json:"usable"` - UnusableReasons []string `json:"unusableReasons"` - } ) func (c AutopilotConfig) Validate() error { diff --git a/autopilot/client.go b/autopilot/client.go index f657d01fc..010c1f037 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -40,7 +40,7 @@ func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostResponse, err e } // HostInfo returns information about all hosts. -func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostHandlerResponse, err error) { +func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostResponse, err error) { err = c.c.POST("/hosts", api.SearchHostsRequest{ Offset: offset, Limit: limit, From 8484bb06b6185f7052f0dfef05becf7e888dcaa8 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 20:03:57 +0100 Subject: [PATCH 118/201] bus: fix route --- autopilot/client.go | 2 +- bus/bus.go | 4 ++-- bus/client/hosts.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/autopilot/client.go b/autopilot/client.go index 010c1f037..b1d3a4ac6 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -41,7 +41,7 @@ func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostResponse, err e // HostInfo returns information about all hosts. func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostResponse, err error) { - err = c.c.POST("/hosts", api.SearchHostsRequest{ + err = c.c.POST("/hosts", api.SearchHostOptions{ Offset: offset, Limit: limit, FilterMode: filterMode, diff --git a/bus/bus.go b/bus/bus.go index f8bd2effa..eeb833156 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -253,7 +253,7 @@ func (b *bus) Handler() http.Handler { "GET /autopilot/:id": b.autopilotsHandlerGET, "PUT /autopilot/:id": b.autopilotsHandlerPUT, - "PUT /autopilot/:id/host/:hostkey/checks": b.autopilotHostChecksHandlerPUT, + "PUT /autopilot/:id/host/:hostkey/check": b.autopilotHostCheckHandlerPUT, "GET /buckets": b.bucketsHandlerGET, "POST /buckets": b.bucketsHandlerPOST, @@ -1969,7 +1969,7 @@ func (b *bus) autopilotsHandlerPUT(jc jape.Context) { jc.Check("failed to update autopilot", b.as.UpdateAutopilot(jc.Request.Context(), ap)) } -func (b *bus) autopilotHostChecksHandlerPUT(jc jape.Context) { +func (b *bus) autopilotHostCheckHandlerPUT(jc jape.Context) { var id string if jc.DecodeParam("id", &id) != nil { return diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 4e1aeab30..f1f072b15 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -106,6 +106,6 @@ func (c *Client) UpdateHostBlocklist(ctx context.Context, add, remove []string, // UpdateHostCheck updates the host with the most recent check performed by the // autopilot with given id. func (c *Client) UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) (err error) { - err = c.c.WithContext(ctx).PUT(fmt.Sprintf("/autopilot/%s/host/%s", autopilotID, hostKey), hostCheck) + err = c.c.WithContext(ctx).PUT(fmt.Sprintf("/autopilot/%s/host/%s/check", autopilotID, hostKey), hostCheck) return } From 65c330d7dd19cd7bdf07adb11314f3c9b61fd19d Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 20:12:41 +0100 Subject: [PATCH 119/201] stores: add retry to UpdateHostCheck --- stores/hostdb.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stores/hostdb.go b/stores/hostdb.go index a121ab7d0..822e97f95 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -505,7 +505,7 @@ func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host } func (ss *SQLStore) UpdateHostCheck(ctx context.Context, autopilotID string, hk types.PublicKey, hc api.HostCheck) (err error) { - err = ss.db.Transaction(func(tx *gorm.DB) error { + err = ss.retryTransaction(ctx, (func(tx *gorm.DB) error { // fetch ap id var apID uint if err := tx. @@ -566,7 +566,7 @@ func (ss *SQLStore) UpdateHostCheck(ctx context.Context, autopilotID string, hk GougingUploadErr: hc.Gouging.UploadErr, }). Error - }) + })) return } From 69830362ecb240d0c4cf19fb77de6590bc0b986e Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 20:19:04 +0100 Subject: [PATCH 120/201] autopilot: fix SearchHostOptions usage --- api/host.go | 19 ---------------- autopilot/autopilot.go | 50 +++++++++++++++++++++++++++++++++++------- autopilot/client.go | 2 +- 3 files changed, 43 insertions(+), 28 deletions(-) diff --git a/api/host.go b/api/host.go index 46a5597b3..c1e72aa16 100644 --- a/api/host.go +++ b/api/host.go @@ -258,22 +258,3 @@ func (ub HostUsabilityBreakdown) UnusableReasons() []string { } return reasons } - -func (h Host) ToHostResponse(autopilotID string) HostResponse { - check, ok := h.Checks[autopilotID] - if !ok { - return HostResponse{Host: h.Host} - } - - return HostResponse{ - Host: h.Host, - Checks: &HostChecks{ - Gouging: check.Gouging.Gouging(), - GougingBreakdown: check.Gouging, - Score: check.Score.Score(), - ScoreBreakdown: check.Score, - Usable: check.Usability.IsUsable(), - UnusableReasons: check.Usability.UnusableReasons(), - }, - } -} diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 97ea4e939..6f2548487 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -698,20 +698,32 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { return } - jc.Encode(hi.ToHostResponse(ap.id)) + check, ok := hi.Checks[ap.id] + if ok { + jc.Encode(api.HostResponse{ + Host: hi.Host, + Checks: &api.HostChecks{ + Gouging: check.Gouging.Gouging(), + GougingBreakdown: check.Gouging, + Score: check.Score.Score(), + ScoreBreakdown: check.Score, + Usable: check.Usability.IsUsable(), + UnusableReasons: check.Usability.UnusableReasons(), + }, + }) + return + } + + jc.Encode(api.HostResponse{Host: hi.Host}) } func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { - var req api.SearchHostOptions + var req api.SearchHostsRequest if jc.Decode(&req) != nil { return } else if req.AutopilotID != "" && req.AutopilotID != ap.id { jc.Error(errors.New("invalid autopilot id"), http.StatusBadRequest) return - } else { - // TODO: on next major release we should not re-use options between bus - // and autopilot API if we don't support all fields in both - req.AutopilotID = ap.id } // TODO: remove on next major release @@ -719,13 +731,35 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { return } - hosts, err := ap.bus.SearchHosts(jc.Request.Context(), req) + hosts, err := ap.bus.SearchHosts(jc.Request.Context(), api.SearchHostOptions{ + AutopilotID: ap.id, + Offset: req.Offset, + Limit: req.Limit, + FilterMode: req.FilterMode, + UsabilityMode: req.UsabilityMode, + AddressContains: req.AddressContains, + KeyIn: req.KeyIn, + }) if jc.Check("failed to get host info", err) != nil { return } resps := make([]api.HostResponse, len(hosts)) for i, host := range hosts { - resps[i] = host.ToHostResponse(ap.id) + if check, ok := host.Checks[ap.id]; ok { + resps[i] = api.HostResponse{ + Host: host.Host, + Checks: &api.HostChecks{ + Gouging: check.Gouging.Gouging(), + GougingBreakdown: check.Gouging, + Score: check.Score.Score(), + ScoreBreakdown: check.Score, + Usable: check.Usability.IsUsable(), + UnusableReasons: check.Usability.UnusableReasons(), + }, + } + } else { + resps[i] = api.HostResponse{Host: host.Host} + } } jc.Encode(resps) } diff --git a/autopilot/client.go b/autopilot/client.go index b1d3a4ac6..010c1f037 100644 --- a/autopilot/client.go +++ b/autopilot/client.go @@ -41,7 +41,7 @@ func (c *Client) HostInfo(hostKey types.PublicKey) (resp api.HostResponse, err e // HostInfo returns information about all hosts. func (c *Client) HostInfos(ctx context.Context, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) (resp []api.HostResponse, err error) { - err = c.c.POST("/hosts", api.SearchHostOptions{ + err = c.c.POST("/hosts", api.SearchHostsRequest{ Offset: offset, Limit: limit, FilterMode: filterMode, From d3c428362bd4409594d4e11c9167b43890e7a0fe Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 20:33:39 +0100 Subject: [PATCH 121/201] hostdb: move host to api package --- api/host.go | 76 ++++++++++++++++++++++++++++--- autopilot/autopilot.go | 11 ++--- autopilot/autopilot_test.go | 47 +++++++++---------- autopilot/contractor.go | 11 ++--- autopilot/host_test.go | 14 +++--- autopilot/hostfilter.go | 2 +- autopilot/hosts_test.go | 8 ++-- autopilot/hostscore.go | 19 ++++---- autopilot/hostscore_test.go | 3 +- autopilot/scanner.go | 3 +- autopilot/scanner_test.go | 19 +++----- autopilot/workerpool.go | 3 +- bus/bus.go | 7 ++- bus/client/hosts.go | 7 ++- hostdb/hostdb.go | 71 ----------------------------- internal/test/e2e/cluster_test.go | 5 +- internal/test/e2e/pruning_test.go | 5 +- stores/hostdb.go | 58 ++++++++++++----------- stores/hostdb_test.go | 30 ++++++------ worker/client/rhp.go | 3 +- worker/host.go | 9 ++-- worker/host_test.go | 11 ++--- worker/interactions.go | 6 +-- worker/mocks_test.go | 11 ++--- worker/pricetables.go | 18 ++++---- worker/pricetables_test.go | 3 +- worker/rhpv3.go | 21 ++++----- worker/worker.go | 11 ++--- 28 files changed, 227 insertions(+), 265 deletions(-) diff --git a/api/host.go b/api/host.go index c1e72aa16..e3a414dfb 100644 --- a/api/host.go +++ b/api/host.go @@ -5,9 +5,11 @@ import ( "fmt" "net/url" "strings" + "time" + rhpv2 "go.sia.tech/core/rhp/v2" + rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" - "go.sia.tech/renterd/hostdb" ) const ( @@ -41,12 +43,12 @@ var ( type ( // HostsScanRequest is the request type for the /hosts/scans endpoint. HostsScanRequest struct { - Scans []hostdb.HostScan `json:"scans"` + Scans []HostScan `json:"scans"` } // HostsPriceTablesRequest is the request type for the /hosts/pricetables endpoint. HostsPriceTablesRequest struct { - PriceTableUpdates []hostdb.PriceTableUpdate `json:"priceTableUpdates"` + PriceTableUpdates []HostPriceTableUpdate `json:"priceTableUpdates"` } // HostsRemoveRequest is the request type for the /hosts/remove endpoint. @@ -70,7 +72,7 @@ type ( // HostResponse is the response type for the GET // /api/autopilot/host/:hostkey endpoint. HostResponse struct { - Host hostdb.Host `json:"host"` + Host Host `json:"host"` Checks *HostChecks `json:"checks,omitempty"` } @@ -146,9 +148,54 @@ func (opts HostsForScanningOptions) Apply(values url.Values) { type ( Host struct { - hostdb.Host - Blocked bool `json:"blocked"` - Checks map[string]HostCheck `json:"checks"` + KnownSince time.Time `json:"knownSince"` + LastAnnouncement time.Time `json:"lastAnnouncement"` + PublicKey types.PublicKey `json:"publicKey"` + NetAddress string `json:"netAddress"` + PriceTable HostPriceTable `json:"priceTable"` + Settings rhpv2.HostSettings `json:"settings"` + Interactions HostInteractions `json:"interactions"` + Scanned bool `json:"scanned"` + Blocked bool `json:"blocked"` + Checks map[string]HostCheck `json:"checks"` + } + + HostAddress struct { + PublicKey types.PublicKey `json:"publicKey"` + NetAddress string `json:"netAddress"` + } + + HostInteractions struct { + TotalScans uint64 `json:"totalScans"` + LastScan time.Time `json:"lastScan"` + LastScanSuccess bool `json:"lastScanSuccess"` + LostSectors uint64 `json:"lostSectors"` + SecondToLastScanSuccess bool `json:"secondToLastScanSuccess"` + Uptime time.Duration `json:"uptime"` + Downtime time.Duration `json:"downtime"` + + SuccessfulInteractions float64 `json:"successfulInteractions"` + FailedInteractions float64 `json:"failedInteractions"` + } + + HostScan struct { + HostKey types.PublicKey `json:"hostKey"` + Success bool + Timestamp time.Time + Settings rhpv2.HostSettings + PriceTable rhpv3.HostPriceTable + } + + HostPriceTable struct { + rhpv3.HostPriceTable + Expiry time.Time `json:"expiry"` + } + + HostPriceTableUpdate struct { + HostKey types.PublicKey `json:"hostKey"` + Success bool + Timestamp time.Time + PriceTable HostPriceTable } HostCheck struct { @@ -187,6 +234,21 @@ type ( } ) +// IsAnnounced returns whether the host has been announced. +func (h Host) IsAnnounced() bool { + return !h.LastAnnouncement.IsZero() +} + +// IsOnline returns whether a host is considered online. +func (h Host) IsOnline() bool { + if h.Interactions.TotalScans == 0 { + return false + } else if h.Interactions.TotalScans == 1 { + return h.Interactions.LastScanSuccess + } + return h.Interactions.LastScanSuccess || h.Interactions.SecondToLastScanSuccess +} + func (sb HostScoreBreakdown) String() string { return fmt.Sprintf("Age: %v, Col: %v, Int: %v, SR: %v, UT: %v, V: %v, Pr: %v", sb.Age, sb.Collateral, sb.Interactions, sb.StorageRemaining, sb.Uptime, sb.Version, sb.Prices) } diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 6f2548487..4e3023a2f 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -17,7 +17,6 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" @@ -54,7 +53,7 @@ type Bus interface { // hostdb Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) - HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) + HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]api.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) error @@ -701,7 +700,7 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { check, ok := hi.Checks[ap.id] if ok { jc.Encode(api.HostResponse{ - Host: hi.Host, + Host: hi, Checks: &api.HostChecks{ Gouging: check.Gouging.Gouging(), GougingBreakdown: check.Gouging, @@ -714,7 +713,7 @@ func (ap *Autopilot) hostHandlerGET(jc jape.Context) { return } - jc.Encode(api.HostResponse{Host: hi.Host}) + jc.Encode(api.HostResponse{Host: hi}) } func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { @@ -747,7 +746,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { for i, host := range hosts { if check, ok := host.Checks[ap.id]; ok { resps[i] = api.HostResponse{ - Host: host.Host, + Host: host, Checks: &api.HostChecks{ Gouging: check.Gouging.Gouging(), GougingBreakdown: check.Gouging, @@ -758,7 +757,7 @@ func (ap *Autopilot) hostsHandlerPOST(jc jape.Context) { }, } } else { - resps[i] = api.HostResponse{Host: host.Host} + resps[i] = api.HostResponse{Host: host} } } jc.Encode(resps) diff --git a/autopilot/autopilot_test.go b/autopilot/autopilot_test.go index a21b55c7b..a572c56fc 100644 --- a/autopilot/autopilot_test.go +++ b/autopilot/autopilot_test.go @@ -9,7 +9,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" ) func TestOptimiseGougingSetting(t *testing.T) { @@ -18,32 +17,30 @@ func TestOptimiseGougingSetting(t *testing.T) { for i := 0; i < 10; i++ { hosts = append(hosts, api.Host{ - Host: hostdb.Host{ - KnownSince: time.Unix(0, 0), - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: rhpv3.HostPriceTable{ - CollateralCost: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - }, + KnownSince: time.Unix(0, 0), + PriceTable: api.HostPriceTable{ + HostPriceTable: rhpv3.HostPriceTable{ + CollateralCost: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), }, - Settings: rhpv2.HostSettings{ - AcceptingContracts: true, - Collateral: types.Siacoins(1), - MaxCollateral: types.Siacoins(1000), - Version: "1.6.0", - }, - Interactions: hostdb.Interactions{ - Uptime: time.Hour * 1000, - LastScan: time.Now(), - LastScanSuccess: true, - SecondToLastScanSuccess: true, - TotalScans: 100, - }, - LastAnnouncement: time.Unix(0, 0), - Scanned: true, }, - Blocked: false, - Checks: nil, + Settings: rhpv2.HostSettings{ + AcceptingContracts: true, + Collateral: types.Siacoins(1), + MaxCollateral: types.Siacoins(1000), + Version: "1.6.0", + }, + Interactions: api.HostInteractions{ + Uptime: time.Hour * 1000, + LastScan: time.Now(), + LastScanSuccess: true, + SecondToLastScanSuccess: true, + TotalScans: 100, + }, + LastAnnouncement: time.Unix(0, 0), + Scanned: true, + Blocked: false, + Checks: nil, }) } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 0e85e4302..e72b0078e 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -15,7 +15,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/worker" @@ -103,7 +102,7 @@ type ( } scoredHost struct { - host hostdb.Host + host api.Host score float64 } @@ -761,7 +760,7 @@ func (c *contractor) runContractChecks(ctx context.Context, contracts []api.Cont } // decide whether the contract is still good - ci := contractInfo{contract: contract, priceTable: host.Host.PriceTable.HostPriceTable, settings: host.Host.Settings} + ci := contractInfo{contract: contract, priceTable: host.PriceTable.HostPriceTable, settings: host.Settings} usable, recoverable, refresh, renew, reasons := c.isUsableContract(state.cfg, state, ci, bh, ipFilter) ci.usable = usable ci.recoverable = recoverable @@ -1323,7 +1322,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []api.Host, usedH h.PriceTable.HostBlockHeight = cs.BlockHeight hc := checkHost(state.cfg, state.rs, gc, h, minScore, storedData[h.PublicKey]) if hc.Usability.IsUsable() { - candidates = append(candidates, scoredHost{h.Host, hc.Score.Score()}) + candidates = append(candidates, scoredHost{h, hc.Score.Score()}) continue } @@ -1505,7 +1504,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI return refreshedContract, true, nil } -func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *contractor) formContract(ctx context.Context, w Worker, host api.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { // convenience variables state := c.ap.State() hk := host.PublicKey @@ -1632,7 +1631,7 @@ func initialContractFundingMinMax(cfg api.AutopilotConfig) (min types.Currency, return } -func refreshPriceTable(ctx context.Context, w Worker, host *hostdb.Host) error { +func refreshPriceTable(ctx context.Context, w Worker, host *api.Host) error { // return early if the host's pricetable is not expired yet if time.Now().Before(host.PriceTable.Expiry) { return nil diff --git a/autopilot/host_test.go b/autopilot/host_test.go index fa1a0ab44..965b2b05a 100644 --- a/autopilot/host_test.go +++ b/autopilot/host_test.go @@ -8,7 +8,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" - "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/api" "lukechampine.com/frand" ) @@ -40,20 +40,20 @@ func TestHost(t *testing.T) { } } -func newTestHosts(n int) []hostdb.Host { - hosts := make([]hostdb.Host, n) +func newTestHosts(n int) []api.Host { + hosts := make([]api.Host, n) for i := 0; i < n; i++ { hosts[i] = newTestHost(randomHostKey(), newTestHostPriceTable(), newTestHostSettings()) } return hosts } -func newTestHost(hk types.PublicKey, pt rhpv3.HostPriceTable, settings rhpv2.HostSettings) hostdb.Host { - return hostdb.Host{ +func newTestHost(hk types.PublicKey, pt rhpv3.HostPriceTable, settings rhpv2.HostSettings) api.Host { + return api.Host{ NetAddress: randomIP().String(), KnownSince: time.Now(), LastAnnouncement: time.Now(), - Interactions: hostdb.Interactions{ + Interactions: api.HostInteractions{ TotalScans: 2, LastScan: time.Now().Add(-time.Minute), LastScanSuccess: true, @@ -65,7 +65,7 @@ func newTestHost(hk types.PublicKey, pt rhpv3.HostPriceTable, settings rhpv2.Hos FailedInteractions: 0, }, PublicKey: hk, - PriceTable: hostdb.HostPriceTable{HostPriceTable: pt, Expiry: time.Now().Add(time.Minute)}, + PriceTable: api.HostPriceTable{HostPriceTable: pt, Expiry: time.Now().Add(time.Minute)}, Settings: settings, Scanned: true, } diff --git a/autopilot/hostfilter.go b/autopilot/hostfilter.go index 9a6e4afbf..61edbbe69 100644 --- a/autopilot/hostfilter.go +++ b/autopilot/hostfilter.go @@ -140,7 +140,7 @@ func checkHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.Gou // not gouging, this because the core package does not have overflow // checks in its cost calculations needed to calculate the period // cost - sb = hostScore(cfg, h.Host, storedData, rs.Redundancy()) + sb = hostScore(cfg, h, storedData, rs.Redundancy()) if sb.Score() < minScore { ub.LowScore = true } diff --git a/autopilot/hosts_test.go b/autopilot/hosts_test.go index 332bf1ea3..6644a1cd2 100644 --- a/autopilot/hosts_test.go +++ b/autopilot/hosts_test.go @@ -5,7 +5,7 @@ import ( "testing" "go.sia.tech/core/types" - "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/api" "lukechampine.com/frand" ) @@ -18,7 +18,7 @@ func TestScoredHostsRandSelectByScore(t *testing.T) { var hosts scoredHosts for hk, score := range hostToScores { - hosts = append(hosts, scoredHost{score: score, host: hostdb.Host{PublicKey: hk}}) + hosts = append(hosts, scoredHost{score: score, host: api.Host{PublicKey: hk}}) } for i := 0; i < 1000; i++ { @@ -55,8 +55,8 @@ func TestScoredHostsRandSelectByScore(t *testing.T) { // assert select is random on equal inputs counts := make([]int, 2) hosts = scoredHosts{ - {score: .1, host: hostdb.Host{PublicKey: types.PublicKey{1}}}, - {score: .1, host: hostdb.Host{PublicKey: types.PublicKey{2}}}, + {score: .1, host: api.Host{PublicKey: types.PublicKey{1}}}, + {score: .1, host: api.Host{PublicKey: types.PublicKey{2}}}, } for i := 0; i < 100; i++ { if hosts.randSelectByScore(1)[0].host.PublicKey == (types.PublicKey{1}) { diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index 3c26dce42..fc98499f1 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -9,13 +9,12 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/siad/build" ) const smallestValidScore = math.SmallestNonzeroFloat64 -func hostScore(cfg api.AutopilotConfig, h hostdb.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { +func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { // idealDataPerHost is the amount of data that we would have to put on each // host assuming that our storage requirements were spread evenly across // every single host. @@ -92,7 +91,7 @@ func storageRemainingScore(h rhpv2.HostSettings, storedData uint64, allocationPe return math.Pow(storageRatio, 2.0) } -func ageScore(h hostdb.Host) float64 { +func ageScore(h api.Host) float64 { // sanity check if h.KnownSince.IsZero() { return 0 @@ -179,14 +178,14 @@ func collateralScore(cfg api.AutopilotConfig, pt rhpv3.HostPriceTable, allocatio } } -func interactionScore(h hostdb.Host) float64 { +func interactionScore(h api.Host) float64 { success, fail := 30.0, 1.0 success += h.Interactions.SuccessfulInteractions fail += h.Interactions.FailedInteractions return math.Pow(success/(success+fail), 10) } -func uptimeScore(h hostdb.Host) float64 { +func uptimeScore(h api.Host) float64 { secondToLastScanSuccess := h.Interactions.SecondToLastScanSuccess lastScanSuccess := h.Interactions.LastScanSuccess uptime := h.Interactions.Uptime @@ -258,7 +257,7 @@ func versionScore(settings rhpv2.HostSettings) float64 { // contractPriceForScore returns the contract price of the host used for // scoring. Since we don't know whether rhpv2 or rhpv3 are used, we return the // bigger one for a pesimistic score. -func contractPriceForScore(h hostdb.Host) types.Currency { +func contractPriceForScore(h api.Host) types.Currency { cp := h.Settings.ContractPrice if cp.Cmp(h.PriceTable.ContractPrice) > 0 { cp = h.PriceTable.ContractPrice @@ -285,26 +284,26 @@ func sectorUploadCost(pt rhpv3.HostPriceTable, duration uint64) types.Currency { return uploadSectorCostRHPv3 } -func uploadCostForScore(cfg api.AutopilotConfig, h hostdb.Host, bytes uint64) types.Currency { +func uploadCostForScore(cfg api.AutopilotConfig, h api.Host, bytes uint64) types.Currency { uploadSectorCostRHPv3 := sectorUploadCost(h.PriceTable.HostPriceTable, cfg.Contracts.Period) numSectors := bytesToSectors(bytes) return uploadSectorCostRHPv3.Mul64(numSectors) } -func downloadCostForScore(h hostdb.Host, bytes uint64) types.Currency { +func downloadCostForScore(h api.Host, bytes uint64) types.Currency { rsc := h.PriceTable.BaseCost().Add(h.PriceTable.ReadSectorCost(rhpv2.SectorSize)) downloadSectorCostRHPv3, _ := rsc.Total() numSectors := bytesToSectors(bytes) return downloadSectorCostRHPv3.Mul64(numSectors) } -func storageCostForScore(cfg api.AutopilotConfig, h hostdb.Host, bytes uint64) types.Currency { +func storageCostForScore(cfg api.AutopilotConfig, h api.Host, bytes uint64) types.Currency { storeSectorCostRHPv3 := sectorStorageCost(h.PriceTable.HostPriceTable, cfg.Contracts.Period) numSectors := bytesToSectors(bytes) return storeSectorCostRHPv3.Mul64(numSectors) } -func hostPeriodCostForScore(h hostdb.Host, cfg api.AutopilotConfig, expectedRedundancy float64) types.Currency { +func hostPeriodCostForScore(h api.Host, cfg api.AutopilotConfig, expectedRedundancy float64) types.Currency { // compute how much data we upload, download and store. uploadPerHost := uint64(float64(cfg.Contracts.Upload) * expectedRedundancy / float64(cfg.Contracts.Amount)) downloadPerHost := uint64(float64(cfg.Contracts.Download) * expectedRedundancy / float64(cfg.Contracts.Amount)) diff --git a/autopilot/hostscore_test.go b/autopilot/hostscore_test.go index e48417235..bc5f61b8a 100644 --- a/autopilot/hostscore_test.go +++ b/autopilot/hostscore_test.go @@ -9,7 +9,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" ) var cfg = api.AutopilotConfig{ @@ -34,7 +33,7 @@ var cfg = api.AutopilotConfig{ func TestHostScore(t *testing.T) { day := 24 * time.Hour - newHost := func(s rhpv2.HostSettings) hostdb.Host { + newHost := func(s rhpv2.HostSettings) api.Host { return newTestHost(randomHostKey(), newTestHostPriceTable(), s) } h1 := newHost(newTestHostSettings()) diff --git a/autopilot/scanner.go b/autopilot/scanner.go index d733c8d0c..f466a842d 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -11,7 +11,6 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -32,7 +31,7 @@ type ( // scanner tests with every interface change bus interface { SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) - HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) + HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]api.HostAddress, error) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) } diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index 860a855fe..512cbd517 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -9,13 +9,12 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) type mockBus struct { - hosts []hostdb.Host + hosts []api.Host reqs []string } @@ -32,14 +31,10 @@ func (b *mockBus) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ( end = len(b.hosts) } - hosts := make([]api.Host, len(b.hosts[start:end])) - for i, h := range b.hosts[start:end] { - hosts[i] = api.Host{Host: h} - } - return hosts, nil + return b.hosts[start:end], nil } -func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]hostdb.HostAddress, error) { +func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]api.HostAddress, error) { hosts, err := b.SearchHosts(ctx, api.SearchHostOptions{ Offset: opts.Offset, Limit: opts.Limit, @@ -47,9 +42,9 @@ func (b *mockBus) HostsForScanning(ctx context.Context, opts api.HostsForScannin if err != nil { return nil, err } - var hostAddresses []hostdb.HostAddress + var hostAddresses []api.HostAddress for _, h := range hosts { - hostAddresses = append(hostAddresses, hostdb.HostAddress{ + hostAddresses = append(hostAddresses, api.HostAddress{ NetAddress: h.NetAddress, PublicKey: h.PublicKey, }) @@ -80,8 +75,8 @@ func (w *mockWorker) RHPScan(ctx context.Context, hostKey types.PublicKey, hostI return api.RHPScanResponse{}, nil } -func (w *mockWorker) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string) (hostdb.HostPriceTable, error) { - return hostdb.HostPriceTable{}, nil +func (w *mockWorker) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string) (api.HostPriceTable, error) { + return api.HostPriceTable{}, nil } func TestScanner(t *testing.T) { diff --git a/autopilot/workerpool.go b/autopilot/workerpool.go index d8c821354..16a6b4c99 100644 --- a/autopilot/workerpool.go +++ b/autopilot/workerpool.go @@ -9,7 +9,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" "lukechampine.com/frand" ) @@ -23,7 +22,7 @@ type Worker interface { RHPBroadcast(ctx context.Context, fcid types.FileContractID) (err error) RHPForm(ctx context.Context, endHeight uint64, hk types.PublicKey, hostIP string, renterAddress types.Address, renterFunds types.Currency, hostCollateral types.Currency) (rhpv2.ContractRevision, []types.Transaction, error) RHPFund(ctx context.Context, contractID types.FileContractID, hostKey types.PublicKey, hostIP, siamuxAddr string, balance types.Currency) (err error) - RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (hostdb.HostPriceTable, error) + RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (api.HostPriceTable, error) RHPPruneContract(ctx context.Context, fcid types.FileContractID, timeout time.Duration) (pruned, remaining uint64, err error) RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral types.Currency, expectedStorage, windowSize uint64) (api.RHPRenewResponse, error) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) diff --git a/bus/bus.go b/bus/bus.go index eeb833156..24f319de6 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -23,7 +23,6 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" "go.sia.tech/renterd/bus/client" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" @@ -94,9 +93,9 @@ type ( Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) HostAllowlist(ctx context.Context) ([]types.PublicKey, error) HostBlocklist(ctx context.Context) ([]string, error) - HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) - RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error - RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error + HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) + RecordHostScans(ctx context.Context, scans []api.HostScan) error + RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) diff --git a/bus/client/hosts.go b/bus/client/hosts.go index f1f072b15..709cb899c 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -8,7 +8,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" ) // Host returns information about a particular host known to the server. @@ -39,7 +38,7 @@ func (c *Client) Hosts(ctx context.Context, opts api.GetHostsOptions) (hosts []a // HostsForScanning returns 'limit' host addresses at given 'offset' which // haven't been scanned after lastScan. -func (c *Client) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) (hosts []hostdb.HostAddress, err error) { +func (c *Client) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) (hosts []api.HostAddress, err error) { values := url.Values{} opts.Apply(values) err = c.c.WithContext(ctx).GET("/hosts/scanning?"+values.Encode(), &hosts) @@ -47,7 +46,7 @@ func (c *Client) HostsForScanning(ctx context.Context, opts api.HostsForScanning } // RecordHostInteraction records an interaction for the supplied host. -func (c *Client) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) (err error) { +func (c *Client) RecordHostScans(ctx context.Context, scans []api.HostScan) (err error) { err = c.c.WithContext(ctx).POST("/hosts/scans", api.HostsScanRequest{ Scans: scans, }, nil) @@ -55,7 +54,7 @@ func (c *Client) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) ( } // RecordHostInteraction records an interaction for the supplied host. -func (c *Client) RecordPriceTables(ctx context.Context, priceTableUpdates []hostdb.PriceTableUpdate) (err error) { +func (c *Client) RecordPriceTables(ctx context.Context, priceTableUpdates []api.HostPriceTableUpdate) (err error) { err = c.c.WithContext(ctx).POST("/hosts/pricetables", api.HostsPriceTablesRequest{ PriceTableUpdates: priceTableUpdates, }, nil) diff --git a/hostdb/hostdb.go b/hostdb/hostdb.go index 1f4c341de..1a957e327 100644 --- a/hostdb/hostdb.go +++ b/hostdb/hostdb.go @@ -4,8 +4,6 @@ import ( "time" "gitlab.com/NebulousLabs/encoding" - rhpv2 "go.sia.tech/core/rhp/v2" - rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/siad/crypto" "go.sia.tech/siad/modules" @@ -59,72 +57,3 @@ func ForEachAnnouncement(b types.Block, height uint64, fn func(types.PublicKey, } } } - -// Interactions contains metadata about a host's interactions. -type Interactions struct { - TotalScans uint64 `json:"totalScans"` - LastScan time.Time `json:"lastScan"` - LastScanSuccess bool `json:"lastScanSuccess"` - LostSectors uint64 `json:"lostSectors"` - SecondToLastScanSuccess bool `json:"secondToLastScanSuccess"` - Uptime time.Duration `json:"uptime"` - Downtime time.Duration `json:"downtime"` - - SuccessfulInteractions float64 `json:"successfulInteractions"` - FailedInteractions float64 `json:"failedInteractions"` -} - -type HostScan struct { - HostKey types.PublicKey `json:"hostKey"` - Success bool - Timestamp time.Time - Settings rhpv2.HostSettings - PriceTable rhpv3.HostPriceTable -} - -type PriceTableUpdate struct { - HostKey types.PublicKey `json:"hostKey"` - Success bool - Timestamp time.Time - PriceTable HostPriceTable -} - -// HostAddress contains the address of a specific host identified by a public -// key. -type HostAddress struct { - PublicKey types.PublicKey `json:"publicKey"` - NetAddress string `json:"netAddress"` -} - -// A Host pairs a host's public key with a set of interactions. -type Host struct { - KnownSince time.Time `json:"knownSince"` - LastAnnouncement time.Time `json:"lastAnnouncement"` - PublicKey types.PublicKey `json:"publicKey"` - NetAddress string `json:"netAddress"` - PriceTable HostPriceTable `json:"priceTable"` - Settings rhpv2.HostSettings `json:"settings"` - Interactions Interactions `json:"interactions"` - Scanned bool `json:"scanned"` -} - -// A HostPriceTable extends the host price table with its expiry. -type HostPriceTable struct { - rhpv3.HostPriceTable - Expiry time.Time `json:"expiry"` -} - -// IsAnnounced returns whether the host has been announced. -func (h Host) IsAnnounced() bool { - return !h.LastAnnouncement.IsZero() -} - -// IsOnline returns whether a host is considered online. -func (h Host) IsOnline() bool { - if h.Interactions.TotalScans == 0 { - return false - } else if h.Interactions.TotalScans == 1 { - return h.Interactions.LastScanSuccess - } - return h.Interactions.LastScanSuccess || h.Interactions.SecondToLastScanSuccess -} diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 77898d4cf..76e6dd814 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -23,7 +23,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" @@ -166,7 +165,7 @@ func TestNewTestCluster(t *testing.T) { if len(hi.Checks.UnusableReasons) != 0 { t.Fatal("usable hosts don't have any reasons set") } - if reflect.DeepEqual(hi.Host, hostdb.Host{}) { + if reflect.DeepEqual(hi.Host, api.Host{}) { t.Fatal("host wasn't set") } } @@ -188,7 +187,7 @@ func TestNewTestCluster(t *testing.T) { if len(hi.Checks.UnusableReasons) != 0 { t.Fatal("usable hosts don't have any reasons set") } - if reflect.DeepEqual(hi.Host, hostdb.Host{}) { + if reflect.DeepEqual(hi.Host, api.Host{}) { t.Fatal("host wasn't set") } allHosts[hi.Host.PublicKey] = struct{}{} diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index de948c970..7c1a856f1 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -11,7 +11,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/test" ) @@ -32,10 +31,10 @@ func TestHostPruning(t *testing.T) { now := time.Now() recordFailedInteractions := func(n int, hk types.PublicKey) { t.Helper() - his := make([]hostdb.HostScan, n) + his := make([]api.HostScan, n) for i := 0; i < n; i++ { now = now.Add(time.Hour).Add(time.Minute) // 1m leeway - his[i] = hostdb.HostScan{ + his[i] = api.HostScan{ HostKey: hk, Timestamp: now, Success: false, diff --git a/stores/hostdb.go b/stores/hostdb.go index 822e97f95..38dde848c 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -41,7 +41,7 @@ var ( ) type ( - // dbHost defines a hostdb.Interaction as persisted in the DB. Deleting a + // dbHost defines a api.Interaction as persisted in the DB. Deleting a // host from the db will cascade the deletion and also delete the // corresponding announcements and interactions with that host. // @@ -322,31 +322,29 @@ func (h dbHost) convert(blocked bool) api.Host { checks[check.DBAutopilot.Identifier] = check.convert() } return api.Host{ - Host: hostdb.Host{ - KnownSince: h.CreatedAt, - LastAnnouncement: h.LastAnnouncement, - NetAddress: h.NetAddress, - Interactions: hostdb.Interactions{ - TotalScans: h.TotalScans, - LastScan: lastScan, - LastScanSuccess: h.LastScanSuccess, - SecondToLastScanSuccess: h.SecondToLastScanSuccess, - Uptime: h.Uptime, - Downtime: h.Downtime, - SuccessfulInteractions: h.SuccessfulInteractions, - FailedInteractions: h.FailedInteractions, - LostSectors: h.LostSectors, - }, - PriceTable: hostdb.HostPriceTable{ - HostPriceTable: h.PriceTable.convert(), - Expiry: h.PriceTableExpiry.Time, - }, - PublicKey: types.PublicKey(h.PublicKey), - Scanned: h.Scanned, - Settings: h.Settings.convert(), + KnownSince: h.CreatedAt, + LastAnnouncement: h.LastAnnouncement, + NetAddress: h.NetAddress, + Interactions: api.HostInteractions{ + TotalScans: h.TotalScans, + LastScan: lastScan, + LastScanSuccess: h.LastScanSuccess, + SecondToLastScanSuccess: h.SecondToLastScanSuccess, + Uptime: h.Uptime, + Downtime: h.Downtime, + SuccessfulInteractions: h.SuccessfulInteractions, + FailedInteractions: h.FailedInteractions, + LostSectors: h.LostSectors, }, - Blocked: blocked, - Checks: checks, + PriceTable: api.HostPriceTable{ + HostPriceTable: h.PriceTable.convert(), + Expiry: h.PriceTableExpiry.Time, + }, + PublicKey: types.PublicKey(h.PublicKey), + Scanned: h.Scanned, + Settings: h.Settings.convert(), + Blocked: blocked, + Checks: checks, } } @@ -571,7 +569,7 @@ func (ss *SQLStore) UpdateHostCheck(ctx context.Context, autopilotID string, hk } // HostsForScanning returns the address of hosts for scanning. -func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]hostdb.HostAddress, error) { +func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) { if offset < 0 { return nil, ErrNegativeOffset } @@ -580,7 +578,7 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, PublicKey publicKey `gorm:"unique;index;NOT NULL"` NetAddress string } - var hostAddresses []hostdb.HostAddress + var hostAddresses []api.HostAddress err := ss.db. WithContext(ctx). @@ -591,7 +589,7 @@ func (ss *SQLStore) HostsForScanning(ctx context.Context, maxLastScan time.Time, Order("last_scan ASC"). FindInBatches(&hosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { for _, h := range hosts { - hostAddresses = append(hostAddresses, hostdb.HostAddress{ + hostAddresses = append(hostAddresses, api.HostAddress{ PublicKey: types.PublicKey(h.PublicKey), NetAddress: h.NetAddress, }) @@ -844,7 +842,7 @@ func (ss *SQLStore) HostBlocklist(ctx context.Context) (blocklist []string, err return } -func (ss *SQLStore) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error { +func (ss *SQLStore) RecordHostScans(ctx context.Context, scans []api.HostScan) error { if len(scans) == 0 { return nil // nothing to do } @@ -965,7 +963,7 @@ func (ss *SQLStore) RecordHostScans(ctx context.Context, scans []hostdb.HostScan }) } -func (ss *SQLStore) RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error { +func (ss *SQLStore) RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error { if len(priceTableUpdate) == 0 { return nil // nothing to do } diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 196170b01..e810af541 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -148,7 +148,7 @@ func TestSQLHostDB(t *testing.T) { } func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, settings rhpv2.HostSettings) error { - return s.RecordHostScans(context.Background(), []hostdb.HostScan{ + return s.RecordHostScans(context.Background(), []api.HostScan{ { HostKey: hk, Settings: settings, @@ -308,15 +308,15 @@ func TestSearchHosts(t *testing.T) { t.Fatal(err) } else if len(his) != 2 { t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{2}) || his[1].Host.PublicKey != (types.PublicKey{3}) { - t.Fatal("unexpected", his[0].Host.PublicKey, his[1].Host.PublicKey) + } else if his[0].PublicKey != (types.PublicKey{2}) || his[1].PublicKey != (types.PublicKey{3}) { + t.Fatal("unexpected", his[0].PublicKey, his[1].PublicKey) } his, err = ss.SearchHosts(context.Background(), "", api.HostFilterModeBlocked, api.UsabilityFilterModeAll, "", nil, 0, -1) if err != nil { t.Fatal(err) } else if len(his) != 1 { t.Fatal("unexpected") - } else if his[0].Host.PublicKey != (types.PublicKey{1}) { + } else if his[0].PublicKey != (types.PublicKey{1}) { t.Fatal("unexpected", his) } err = ss.UpdateHostBlocklistEntries(context.Background(), nil, nil, true) @@ -480,7 +480,7 @@ func TestRecordScan(t *testing.T) { if err != nil { t.Fatal(err) } - if host.Interactions != (hostdb.Interactions{}) { + if host.Interactions != (api.HostInteractions{}) { t.Fatal("mismatch") } if host.Settings != (rhpv2.HostSettings{}) { @@ -499,7 +499,7 @@ func TestRecordScan(t *testing.T) { // Record a scan. firstScanTime := time.Now().UTC() settings := rhpv2.HostSettings{NetAddress: "host.com"} - if err := ss.RecordHostScans(ctx, []hostdb.HostScan{newTestScan(hk, firstScanTime, settings, true)}); err != nil { + if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, firstScanTime, settings, true)}); err != nil { t.Fatal(err) } host, err = ss.Host(ctx, hk) @@ -514,7 +514,7 @@ func TestRecordScan(t *testing.T) { t.Fatal("wrong time") } host.Interactions.LastScan = time.Time{} - if expected := (hostdb.Interactions{ + if expected := (api.HostInteractions{ TotalScans: 1, LastScan: time.Time{}, LastScanSuccess: true, @@ -532,7 +532,7 @@ func TestRecordScan(t *testing.T) { // Record another scan 1 hour after the previous one. secondScanTime := firstScanTime.Add(time.Hour) - if err := ss.RecordHostScans(ctx, []hostdb.HostScan{newTestScan(hk, secondScanTime, settings, true)}); err != nil { + if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, secondScanTime, settings, true)}); err != nil { t.Fatal(err) } host, err = ss.Host(ctx, hk) @@ -544,7 +544,7 @@ func TestRecordScan(t *testing.T) { } host.Interactions.LastScan = time.Time{} uptime += secondScanTime.Sub(firstScanTime) - if host.Interactions != (hostdb.Interactions{ + if host.Interactions != (api.HostInteractions{ TotalScans: 2, LastScan: time.Time{}, LastScanSuccess: true, @@ -559,7 +559,7 @@ func TestRecordScan(t *testing.T) { // Record another scan 2 hours after the second one. This time it fails. thirdScanTime := secondScanTime.Add(2 * time.Hour) - if err := ss.RecordHostScans(ctx, []hostdb.HostScan{newTestScan(hk, thirdScanTime, settings, false)}); err != nil { + if err := ss.RecordHostScans(ctx, []api.HostScan{newTestScan(hk, thirdScanTime, settings, false)}); err != nil { t.Fatal(err) } host, err = ss.Host(ctx, hk) @@ -571,7 +571,7 @@ func TestRecordScan(t *testing.T) { } host.Interactions.LastScan = time.Time{} downtime += thirdScanTime.Sub(secondScanTime) - if host.Interactions != (hostdb.Interactions{ + if host.Interactions != (api.HostInteractions{ TotalScans: 3, LastScan: time.Time{}, LastScanSuccess: false, @@ -621,7 +621,7 @@ func TestRemoveHosts(t *testing.T) { hi2 := newTestScan(hk, t2, rhpv2.HostSettings{NetAddress: "host.com"}, false) // record interactions - if err := ss.RecordHostScans(context.Background(), []hostdb.HostScan{hi1, hi2}); err != nil { + if err := ss.RecordHostScans(context.Background(), []api.HostScan{hi1, hi2}); err != nil { t.Fatal(err) } @@ -649,7 +649,7 @@ func TestRemoveHosts(t *testing.T) { // record interactions t3 := now.Add(-time.Minute * 60) // 1 hour ago (60min downtime) hi3 := newTestScan(hk, t3, rhpv2.HostSettings{NetAddress: "host.com"}, false) - if err := ss.RecordHostScans(context.Background(), []hostdb.HostScan{hi3}); err != nil { + if err := ss.RecordHostScans(context.Background(), []api.HostScan{hi3}); err != nil { t.Fatal(err) } @@ -1303,8 +1303,8 @@ func hostByPubKey(tx *gorm.DB, hostKey types.PublicKey) (dbHost, error) { } // newTestScan returns a host interaction with given parameters. -func newTestScan(hk types.PublicKey, scanTime time.Time, settings rhpv2.HostSettings, success bool) hostdb.HostScan { - return hostdb.HostScan{ +func newTestScan(hk types.PublicKey, scanTime time.Time, settings rhpv2.HostSettings, success bool) api.HostScan { + return api.HostScan{ HostKey: hk, Success: success, Timestamp: scanTime, diff --git a/worker/client/rhp.go b/worker/client/rhp.go index c71ddd1dc..ec7e10b3d 100644 --- a/worker/client/rhp.go +++ b/worker/client/rhp.go @@ -8,7 +8,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" rhpv2 "go.sia.tech/core/rhp/v2" ) @@ -53,7 +52,7 @@ func (c *Client) RHPFund(ctx context.Context, contractID types.FileContractID, h } // RHPPriceTable fetches a price table for a host. -func (c *Client) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (pt hostdb.HostPriceTable, err error) { +func (c *Client) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (pt api.HostPriceTable, err error) { req := api.RHPPriceTableRequest{ HostKey: hostKey, SiamuxAddr: siamuxAddr, diff --git a/worker/host.go b/worker/host.go index cd29572cc..4f4e97496 100644 --- a/worker/host.go +++ b/worker/host.go @@ -12,7 +12,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.uber.org/zap" ) @@ -23,7 +22,7 @@ type ( DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error - FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt hostdb.HostPriceTable, err error) + FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt api.HostPriceTable, err error) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (types.FileContractRevision, error) FundAccount(ctx context.Context, balance types.Currency, rev *types.FileContractRevision) error @@ -187,12 +186,12 @@ func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rh return rev, txnSet, contractPrice, renewErr } -func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt hostdb.HostPriceTable, err error) { +func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt api.HostPriceTable, err error) { // fetchPT is a helper function that performs the RPC given a payment function - fetchPT := func(paymentFn PriceTablePaymentFunc) (hpt hostdb.HostPriceTable, err error) { + fetchPT := func(paymentFn PriceTablePaymentFunc) (hpt api.HostPriceTable, err error) { err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) (err error) { hpt, err = RPCPriceTable(ctx, t, paymentFn) - h.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + h.bus.RecordPriceTables(ctx, []api.HostPriceTableUpdate{ { HostKey: h.hk, Success: isSuccessfulInteraction(err), diff --git a/worker/host_test.go b/worker/host_test.go index a993c12e1..3d124e9aa 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -13,7 +13,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/test" "lukechampine.com/frand" ) @@ -22,7 +21,7 @@ type ( testHost struct { *hostMock *contractMock - hptFn func() hostdb.HostPriceTable + hptFn func() api.HostPriceTable } testHostManager struct { @@ -57,7 +56,7 @@ func newTestHost(h *hostMock, c *contractMock) *testHost { return newTestHostCustom(h, c, newTestHostPriceTable) } -func newTestHostCustom(h *hostMock, c *contractMock, hptFn func() hostdb.HostPriceTable) *testHost { +func newTestHostCustom(h *hostMock, c *contractMock, hptFn func() api.HostPriceTable) *testHost { return &testHost{ hostMock: h, contractMock: c, @@ -65,11 +64,11 @@ func newTestHostCustom(h *hostMock, c *contractMock, hptFn func() hostdb.HostPri } } -func newTestHostPriceTable() hostdb.HostPriceTable { +func newTestHostPriceTable() api.HostPriceTable { var uid rhpv3.SettingsID frand.Read(uid[:]) - return hostdb.HostPriceTable{ + return api.HostPriceTable{ HostPriceTable: rhpv3.HostPriceTable{UID: uid, HostBlockHeight: 100, Validity: time.Minute}, Expiry: time.Now().Add(time.Minute), } @@ -103,7 +102,7 @@ func (h *testHost) FetchRevision(ctx context.Context, fetchTimeout time.Duration return rev, nil } -func (h *testHost) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hostdb.HostPriceTable, error) { +func (h *testHost) FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (api.HostPriceTable, error) { return h.hptFn(), nil } diff --git a/worker/interactions.go b/worker/interactions.go index 2107ae582..34e47953a 100644 --- a/worker/interactions.go +++ b/worker/interactions.go @@ -1,13 +1,13 @@ package worker import ( - "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/api" ) type ( HostInteractionRecorder interface { - RecordHostScan(...hostdb.HostScan) - RecordPriceTableUpdate(...hostdb.PriceTableUpdate) + RecordHostScan(...api.HostScan) + RecordPriceTableUpdate(...api.HostPriceTableUpdate) } ) diff --git a/worker/mocks_test.go b/worker/mocks_test.go index baf83b39d..c24b67df3 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -15,7 +15,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" ) @@ -267,10 +266,8 @@ func newHostMock(hk types.PublicKey) *hostMock { return &hostMock{ hk: hk, hi: api.Host{ - Host: hostdb.Host{ - PublicKey: hk, - Scanned: true, - }, + PublicKey: hk, + Scanned: true, }, } } @@ -298,11 +295,11 @@ func (hs *hostStoreMock) Host(ctx context.Context, hostKey types.PublicKey) (api return h.hi, nil } -func (hs *hostStoreMock) RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error { +func (hs *hostStoreMock) RecordHostScans(ctx context.Context, scans []api.HostScan) error { return nil } -func (hs *hostStoreMock) RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error { +func (hs *hostStoreMock) RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error { return nil } diff --git a/worker/pricetables.go b/worker/pricetables.go index 1bc2ee009..9ca4b1541 100644 --- a/worker/pricetables.go +++ b/worker/pricetables.go @@ -10,7 +10,7 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" - "go.sia.tech/renterd/hostdb" + "go.sia.tech/renterd/api" "lukechampine.com/frand" ) @@ -47,14 +47,14 @@ type ( hk types.PublicKey mu sync.Mutex - hpt hostdb.HostPriceTable + hpt api.HostPriceTable update *priceTableUpdate } priceTableUpdate struct { err error done chan struct{} - hpt hostdb.HostPriceTable + hpt api.HostPriceTable } ) @@ -75,7 +75,7 @@ func newPriceTables(hm HostManager, hs HostStore) *priceTables { } // fetch returns a price table for the given host -func (pts *priceTables) fetch(ctx context.Context, hk types.PublicKey, rev *types.FileContractRevision) (hostdb.HostPriceTable, error) { +func (pts *priceTables) fetch(ctx context.Context, hk types.PublicKey, rev *types.FileContractRevision) (api.HostPriceTable, error) { pts.mu.Lock() pt, exists := pts.priceTables[hk] if !exists { @@ -105,7 +105,7 @@ func (pt *priceTable) ongoingUpdate() (bool, *priceTableUpdate) { return ongoing, pt.update } -func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) (hpt hostdb.HostPriceTable, err error) { +func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) (hpt api.HostPriceTable, err error) { // grab the current price table p.mu.Lock() hpt = p.hpt @@ -115,7 +115,7 @@ func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) // current price table is considered to gouge on the block height gc, err := GougingCheckerFromContext(ctx, false) if err != nil { - return hostdb.HostPriceTable{}, err + return api.HostPriceTable{}, err } // figure out whether we should update the price table, if not we can return @@ -137,7 +137,7 @@ func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) } else if ongoing { select { case <-ctx.Done(): - return hostdb.HostPriceTable{}, fmt.Errorf("%w; %w", errPriceTableUpdateTimedOut, ctx.Err()) + return api.HostPriceTable{}, fmt.Errorf("%w; %w", errPriceTableUpdateTimedOut, ctx.Err()) case <-update.done: } return update.hpt, update.err @@ -166,14 +166,14 @@ func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) // sanity check the host has been scanned before fetching the price table if !host.Scanned { - return hostdb.HostPriceTable{}, fmt.Errorf("host %v was not scanned", p.hk) + return api.HostPriceTable{}, fmt.Errorf("host %v was not scanned", p.hk) } // otherwise fetch it h := p.hm.Host(p.hk, types.FileContractID{}, host.Settings.SiamuxAddr()) hpt, err = h.FetchPriceTable(ctx, rev) if err != nil { - return hostdb.HostPriceTable{}, fmt.Errorf("failed to update pricetable, err %v", err) + return api.HostPriceTable{}, fmt.Errorf("failed to update pricetable, err %v", err) } return diff --git a/worker/pricetables_test.go b/worker/pricetables_test.go index 55b0f7057..22c021ccb 100644 --- a/worker/pricetables_test.go +++ b/worker/pricetables_test.go @@ -7,7 +7,6 @@ import ( "time" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" ) func TestPriceTables(t *testing.T) { @@ -45,7 +44,7 @@ func TestPriceTables(t *testing.T) { // manage the host, make sure fetching the price table blocks fetchPTBlockChan := make(chan struct{}) validPT := newTestHostPriceTable() - hm.addHost(newTestHostCustom(h, c, func() hostdb.HostPriceTable { + hm.addHost(newTestHostCustom(h, c, func() api.HostPriceTable { <-fetchPTBlockChan return validPT })) diff --git a/worker/rhpv3.go b/worker/rhpv3.go index c0404b128..22b75adc3 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -18,7 +18,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/mux/v1" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.sia.tech/siad/crypto" "go.uber.org/zap" @@ -623,36 +622,36 @@ func processPayment(s *streamV3, payment rhpv3.PaymentMethod) error { type PriceTablePaymentFunc func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) // RPCPriceTable calls the UpdatePriceTable RPC. -func RPCPriceTable(ctx context.Context, t *transportV3, paymentFunc PriceTablePaymentFunc) (_ hostdb.HostPriceTable, err error) { +func RPCPriceTable(ctx context.Context, t *transportV3, paymentFunc PriceTablePaymentFunc) (_ api.HostPriceTable, err error) { defer wrapErr(&err, "PriceTable") s, err := t.DialStream(ctx) if err != nil { - return hostdb.HostPriceTable{}, err + return api.HostPriceTable{}, err } defer s.Close() var pt rhpv3.HostPriceTable var ptr rhpv3.RPCUpdatePriceTableResponse if err := s.WriteRequest(rhpv3.RPCUpdatePriceTableID, nil); err != nil { - return hostdb.HostPriceTable{}, fmt.Errorf("couldn't send RPCUpdatePriceTableID: %w", err) + return api.HostPriceTable{}, fmt.Errorf("couldn't send RPCUpdatePriceTableID: %w", err) } else if err := s.ReadResponse(&ptr, maxPriceTableSize); err != nil { - return hostdb.HostPriceTable{}, fmt.Errorf("couldn't read RPCUpdatePriceTableResponse: %w", err) + return api.HostPriceTable{}, fmt.Errorf("couldn't read RPCUpdatePriceTableResponse: %w", err) } else if err := json.Unmarshal(ptr.PriceTableJSON, &pt); err != nil { - return hostdb.HostPriceTable{}, fmt.Errorf("couldn't unmarshal price table: %w", err) + return api.HostPriceTable{}, fmt.Errorf("couldn't unmarshal price table: %w", err) } else if payment, err := paymentFunc(pt); err != nil { - return hostdb.HostPriceTable{}, fmt.Errorf("couldn't create payment: %w", err) + return api.HostPriceTable{}, fmt.Errorf("couldn't create payment: %w", err) } else if payment == nil { - return hostdb.HostPriceTable{ + return api.HostPriceTable{ HostPriceTable: pt, Expiry: time.Now(), }, nil // intended not to pay } else if err := processPayment(s, payment); err != nil { - return hostdb.HostPriceTable{}, fmt.Errorf("couldn't process payment: %w", err) + return api.HostPriceTable{}, fmt.Errorf("couldn't process payment: %w", err) } else if err := s.ReadResponse(&rhpv3.RPCPriceTableResponse{}, 0); err != nil { - return hostdb.HostPriceTable{}, fmt.Errorf("couldn't read RPCPriceTableResponse: %w", err) + return api.HostPriceTable{}, fmt.Errorf("couldn't read RPCPriceTableResponse: %w", err) } else { - return hostdb.HostPriceTable{ + return api.HostPriceTable{ HostPriceTable: pt, Expiry: time.Now().Add(pt.Validity), }, nil diff --git a/worker/worker.go b/worker/worker.go index d0de33f71..c78be49ea 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -24,7 +24,6 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/build" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" @@ -106,8 +105,8 @@ type ( } HostStore interface { - RecordHostScans(ctx context.Context, scans []hostdb.HostScan) error - RecordPriceTables(ctx context.Context, priceTableUpdate []hostdb.PriceTableUpdate) error + RecordHostScans(ctx context.Context, scans []api.HostScan) error + RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) @@ -355,9 +354,9 @@ func (w *worker) rhpPriceTableHandler(jc jape.Context) { // defer interaction recording var err error - var hpt hostdb.HostPriceTable + var hpt api.HostPriceTable defer func() { - w.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + w.bus.RecordPriceTables(ctx, []api.HostPriceTableUpdate{ { HostKey: rptr.HostKey, Success: isSuccessfulInteraction(err), @@ -1524,7 +1523,7 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // record scans that timed out. recordCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - scanErr := w.bus.RecordHostScans(recordCtx, []hostdb.HostScan{ + scanErr := w.bus.RecordHostScans(recordCtx, []api.HostScan{ { HostKey: hostKey, Success: isSuccessfulInteraction(err), From 419b21e8bead113f2e900760dfd5bc44123a6a35 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 21:23:47 +0100 Subject: [PATCH 122/201] stores: update SearchHosts --- autopilot/contractor.go | 2 +- stores/hostdb.go | 51 +++++++++++++---------------------------- stores/hostdb_test.go | 24 ++++++++----------- 3 files changed, 27 insertions(+), 50 deletions(-) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 0e85e4302..2be7fe9e9 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -720,7 +720,7 @@ func (c *contractor) runContractChecks(ctx context.Context, contracts []api.Cont } // fetch host checks - check, ok := host.Checks[c.ap.id] + check, ok := hostChecks[hk] if !ok { // this is only possible due to developer error, if there is no // check the host would have been missing, so we treat it the same diff --git a/stores/hostdb.go b/stores/hostdb.go index 822e97f95..ee0277624 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -487,21 +487,14 @@ func (e *dbBlocklistEntry) blocks(h dbHost) bool { // Host returns information about a host. func (ss *SQLStore) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) { - var h dbHost - - tx := ss.db. - WithContext(ctx). - Where(&dbHost{PublicKey: publicKey(hostKey)}). - Preload("Allowlist"). - Preload("Blocklist"). - Take(&h) - if errors.Is(tx.Error, gorm.ErrRecordNotFound) { + hosts, err := ss.SearchHosts(ctx, "", api.HostFilterModeAll, api.UsabilityFilterModeAll, "", []types.PublicKey{hostKey}, 0, 1) + if err != nil { + return api.Host{}, err + } else if len(hosts) == 0 { return api.Host{}, api.ErrHostNotFound - } else if tx.Error != nil { - return api.Host{}, tx.Error + } else { + return hosts[0], nil } - - return h.convert(ss.isBlocked(h)), nil } func (ss *SQLStore) UpdateHostCheck(ctx context.Context, autopilotID string, hk types.PublicKey, hc api.HostCheck) (err error) { @@ -610,10 +603,6 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us return nil, ErrNegativeOffset } - // TODO PJ: use - _ = autopilotID - _ = usabilityMode - // validate filterMode switch filterMode { case api.HostFilterModeAllowed: @@ -631,7 +620,7 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us hostFilter(filterMode, ss.hasAllowlist(), ss.hasBlocklist()), hostNetAddress(addressContains), hostPublicKey(keyIn), - usabilityFilter(usabilityMode), + usabilityFilter(autopilotID, usabilityMode), ) // preload allowlist and blocklist @@ -641,23 +630,9 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us Preload("Blocklist") } - // filter checks - if autopilotID != "" { - query = query.Preload("Checks.DBAutopilot", "identifier = ?", autopilotID) - } else { - query = query.Preload("Checks.DBAutopilot") - } - // query = query. - // Preload("Checks.DBAutopilot"). - // Scopes( - // autopilotFilter(autopilotID), - // usabilityFilter(usabilityMode), - // ) - var hosts []api.Host var fullHosts []dbHost err := query. - Debug(). Offset(offset). Limit(limit). FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { @@ -1144,13 +1119,19 @@ func hostFilter(filterMode string, hasAllowlist, hasBlocklist bool) func(*gorm.D } } -func usabilityFilter(usabilityMode string) func(*gorm.DB) *gorm.DB { +func usabilityFilter(autopilotID, usabilityMode string) func(*gorm.DB) *gorm.DB { return func(db *gorm.DB) *gorm.DB { switch usabilityMode { case api.UsabilityFilterModeUsable: - db = db.Preload("Checks", "usability_blocked = ? AND usability_offline = ? AND usability_low_score = ? AND usability_redundant_ip = ? AND usability_gouging = ? AND usability_not_accepting_contracts = ? AND usability_not_announced = ? AND usability_not_completing_scan = ?", false, false, false, false, false, false, false, false) + db = db. + Joins("INNER JOIN host_checks hc on hc.db_host_id = hosts.id"). + Joins("INNER JOIN autopilots a on a.id = hc.db_autopilot_id AND a.identifier = ?", autopilotID). + Where("hc.usability_blocked = ? AND hc.usability_offline = ? AND hc.usability_low_score = ? AND hc.usability_redundant_ip = ? AND hc.usability_gouging = ? AND hc.usability_not_accepting_contracts = ? AND hc.usability_not_announced = ? AND hc.usability_not_completing_scan = ?", false, false, false, false, false, false, false, false) case api.UsabilityFilterModeUnusable: - db = db.Preload("Checks", "usability_blocked = ? OR usability_offline = ? OR usability_low_score = ? OR usability_redundant_ip = ? OR usability_gouging = ? OR usability_not_accepting_contracts = ? OR usability_not_announced = ? OR usability_not_completing_scan = ?", true, true, true, true, true, true, true, true) + db = db. + Joins("INNER JOIN host_checks hc on hc.db_host_id = hosts.id"). + Joins("INNER JOIN autopilots a on a.id = hc.db_autopilot_id AND a.identifier = ?", autopilotID). + Where("hc.usability_blocked = ? OR hc.usability_offline = ? OR hc.usability_low_score = ? OR hc.usability_redundant_ip = ? OR hc.usability_gouging = ? OR hc.usability_not_accepting_contracts = ? OR hc.usability_not_announced = ? OR hc.usability_not_completing_scan = ?", true, true, true, true, true, true, true, true) case api.UsabilityFilterModeAll: // do nothing } diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index 196170b01..283468474 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -408,32 +408,28 @@ func TestSearchHosts(t *testing.T) { his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUsable, "", nil, 0, -1) if err != nil { t.Fatal(err) - } else if cnt != 3 { - t.Fatal("unexpected", cnt) + } else if len(his) != 1 { + t.Fatal("unexpected", len(his)) } - // assert h1 and h2 have the expected checks + // assert h1 has the expected checks if c1, ok := his[0].Checks[ap1]; !ok || c1 != h1c { t.Fatal("unexpected", c1, ok) - } else if _, ok := his[1].Checks[ap1]; ok { - t.Fatal("unexpected", ok) - } else if _, ok := his[1].Checks[ap2]; ok { - t.Fatal("unexpected") } his, err = ss.SearchHosts(context.Background(), ap1, api.HostFilterModeAll, api.UsabilityFilterModeUnusable, "", nil, 0, -1) if err != nil { t.Fatal(err) - } else if cnt != 3 { - t.Fatal("unexpected", cnt) + } else if len(his) != 1 { + t.Fatal("unexpected", len(his)) + } else if his[0].Host.PublicKey != hk2 { + t.Fatal("unexpected") } - // assert h1 and h2 have the expected checks - if _, ok := his[0].Checks[ap1]; ok { + // assert only ap1 check is there + if _, ok := his[0].Checks[ap1]; !ok { t.Fatal("unexpected") - } else if c2, ok := his[1].Checks[ap1]; !ok || c2 != h2c1 { - t.Fatal("unexpected", ok) - } else if _, ok := his[1].Checks[ap2]; ok { + } else if _, ok := his[0].Checks[ap2]; ok { t.Fatal("unexpected") } From c68a88cde9079eaa746b695046e53270b297df38 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 25 Mar 2024 21:51:42 +0100 Subject: [PATCH 123/201] lint: fix nesting --- stores/hostdb_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index bffca5c0f..6adf19968 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -422,7 +422,7 @@ func TestSearchHosts(t *testing.T) { t.Fatal(err) } else if len(his) != 1 { t.Fatal("unexpected", len(his)) - } else if his[0].Host.PublicKey != hk2 { + } else if his[0].PublicKey != hk2 { t.Fatal("unexpected") } From 73f0640f2d5edaa0e043c95f57f37c9bf0278efd Mon Sep 17 00:00:00 2001 From: Nate Maninger Date: Mon, 25 Mar 2024 17:10:59 -0700 Subject: [PATCH 124/201] ci: release nightlies on linux --- .github/workflows/publish.yml | 56 ++++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 824f69231..8b11e8b62 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,13 +1,13 @@ name: Publish -# Controls when the action will run. +# Controls when the action will run. on: # Triggers the workflow on new SemVer tags push: branches: - master - dev - tags: + tags: - 'v[0-9]+.[0-9]+.[0-9]+' - 'v[0-9]+.[0-9]+.[0-9]+-**' @@ -116,7 +116,7 @@ jobs: with: name: renterd path: release/ - build-mac: + build-mac: runs-on: macos-latest strategy: matrix: @@ -212,7 +212,7 @@ jobs: with: name: renterd path: release/ - build-windows: + build-windows: runs-on: windows-latest strategy: matrix: @@ -253,23 +253,21 @@ jobs: with: name: renterd path: release/ - dispatch: + + dispatch-homebrew: # only runs on full releases if: startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-') needs: [docker, build-linux, build-mac, build-windows] - strategy: - matrix: - repo: ['siafoundation/homebrew-sia', 'siafoundation/linux'] runs-on: ubuntu-latest steps: - name: Extract Tag Name id: get_tag run: echo "::set-output name=tag_name::${GITHUB_REF#refs/tags/}" - - name: Repository Dispatch + - name: Dispatch uses: peter-evans/repository-dispatch@v3 with: token: ${{ secrets.PAT_REPOSITORY_DISPATCH }} - repository: ${{ matrix.repo }} + repository: siafoundation/homebrew-sia event-type: release-tagged client-payload: > { @@ -277,4 +275,40 @@ jobs: "tag": "${{ steps.get_tag.outputs.tag_name }}", "project": "renterd", "workflow_id": "${{ github.run_id }}" - } \ No newline at end of file + } + dispatch-linux: # run on full releases, release candidates, and master branch + if: startsWith(github.ref, 'refs/tags/v') || endsWith(github.ref, 'master') + needs: [docker, build-linux, build-mac, build-windows] + runs-on: ubuntu-latest + steps: + - name: Build Dispatch Payload + id: get_payload + uses: actions/github-script@v7 + with: + script: | + const isRelease = context.ref.startsWith('refs/tags/v'), + isBeta = isRelease && context.ref.includes('-beta'), + tag = isRelease ? context.ref.replace('refs/tags/', '') : 'master'; + + let component = 'nightly'; + if (isBeta) { + component = 'beta'; + } else if (isRelease) { + component = 'main'; + } + + return { + description: "renterd: The Next-Gen Sia Renter", + tag: tag, + project: "renterd", + workflow_id: context.runId, + component: component + }; + + - name: Dispatch + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.PAT_REPOSITORY_DISPATCH }} + repository: siafoundation/linux + event-type: release-tagged + client-payload: ${{ steps.get_payload.outputs.result }} \ No newline at end of file From 59d6009dc60b0bfae8e5d01ca77731d832fba3e2 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 26 Mar 2024 13:36:46 +0100 Subject: [PATCH 125/201] contractor: remove error check --- autopilot/contractor.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 2be7fe9e9..bd8235511 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -286,9 +286,6 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // run contract checks updatedSet, toArchive, toStopUsing, toRefresh, toRenew := c.runContractChecks(ctx, contracts, isInCurrentSet, checks, cs.BlockHeight) - if err != nil { - return false, fmt.Errorf("failed to run contract checks, err: %v", err) - } // update host checks for hk, check := range checks { From e66f3896fd326d4bb939d70d8571e3aae582d4bd Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 11:29:21 +0100 Subject: [PATCH 126/201] main: log avx2 --- cmd/renterd/main.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index bdb1dc423..3dad91c50 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -32,6 +32,7 @@ import ( "go.sia.tech/renterd/worker" "go.sia.tech/web/renterd" "go.uber.org/zap" + "golang.org/x/sys/cpu" "golang.org/x/term" "gopkg.in/yaml.v3" "gorm.io/gorm/logger" @@ -485,6 +486,9 @@ func main() { defer closeFn(context.Background()) logger.Info("renterd", zap.String("version", build.Version()), zap.String("network", build.NetworkName()), zap.String("commit", build.Commit()), zap.Time("buildDate", build.BuildTime())) + if runtime.GOARCH == "amd64" && !cpu.X86.HasAVX2 { + logger.Warn("renterd is running on a system without AVX2 support, performance may be degraded") + } // configure database logger dbLogCfg := cfg.Log.Database From bcc4591e707b773eada22824c3dd16a30d04dd8f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 11:33:26 +0100 Subject: [PATCH 127/201] autopilot: remove debug logging --- autopilot/contractor.go | 1 - 1 file changed, 1 deletion(-) diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 82ea4e619..49ba304ae 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -1653,7 +1653,6 @@ func (c *contractor) shouldForgiveFailedRefresh(fcid types.FileContractID) bool lastFailure = time.Now() c.firstRefreshFailure[fcid] = lastFailure } - fmt.Println(time.Since(lastFailure)) return time.Since(lastFailure) < failedRefreshForgivenessPeriod } From b9f6762ea653a6a0cb65cb9a3ac736d28617aaaa Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 26 Mar 2024 13:07:22 +0100 Subject: [PATCH 128/201] bus: take renewals into account in the sectors cache --- bus/bus.go | 13 ++--- bus/uploadingsectors.go | 97 +++++++++++++++++++++--------------- bus/uploadingsectors_test.go | 82 ++++++++++++++++++++++++------ 3 files changed, 130 insertions(+), 62 deletions(-) diff --git a/bus/bus.go b/bus/bus.go index d68e46309..8c7c99649 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1017,7 +1017,7 @@ func (b *bus) contractsPrunableDataHandlerGET(jc jape.Context) { // adjust the amount of prunable data with the pending uploads, due to // how we record contract spending a contract's size might already // include pending sectors - pending := b.uploadingSectors.pending(fcid) + pending := b.uploadingSectors.Pending(fcid) if pending > size.Prunable { size.Prunable = 0 } else { @@ -1064,7 +1064,7 @@ func (b *bus) contractSizeHandlerGET(jc jape.Context) { // adjust the amount of prunable data with the pending uploads, due to how // we record contract spending a contract's size might already include // pending sectors - pending := b.uploadingSectors.pending(id) + pending := b.uploadingSectors.Pending(id) if pending > size.Prunable { size.Prunable = 0 } else { @@ -1141,6 +1141,7 @@ func (b *bus) contractIDRenewedHandlerPOST(jc jape.Context) { if jc.Check("couldn't store contract", err) == nil { jc.Encode(r) } + b.uploadingSectors.HandleRenewal(req.Contract.ID(), req.RenewedFrom) } func (b *bus) contractIDRootsHandlerGET(jc jape.Context) { @@ -1153,7 +1154,7 @@ func (b *bus) contractIDRootsHandlerGET(jc jape.Context) { if jc.Check("couldn't fetch contract sectors", err) == nil { jc.Encode(api.ContractRootsResponse{ Roots: roots, - Uploading: b.uploadingSectors.sectors(id), + Uploading: b.uploadingSectors.Sectors(id), }) } } @@ -1991,7 +1992,7 @@ func (b *bus) stateHandlerGET(jc jape.Context) { func (b *bus) uploadTrackHandlerPOST(jc jape.Context) { var id api.UploadID if jc.DecodeParam("id", &id) == nil { - jc.Check("failed to track upload", b.uploadingSectors.trackUpload(id)) + jc.Check("failed to track upload", b.uploadingSectors.StartUpload(id)) } } @@ -2004,13 +2005,13 @@ func (b *bus) uploadAddSectorHandlerPOST(jc jape.Context) { if jc.Decode(&req) != nil { return } - jc.Check("failed to add sector", b.uploadingSectors.addUploadingSector(id, req.ContractID, req.Root)) + jc.Check("failed to add sector", b.uploadingSectors.AddSector(id, req.ContractID, req.Root)) } func (b *bus) uploadFinishedHandlerDELETE(jc jape.Context) { var id api.UploadID if jc.DecodeParam("id", &id) == nil { - b.uploadingSectors.finishUpload(id) + b.uploadingSectors.FinishUpload(id) } } diff --git a/bus/uploadingsectors.go b/bus/uploadingsectors.go index 6a3917d50..18c64a7c5 100644 --- a/bus/uploadingsectors.go +++ b/bus/uploadingsectors.go @@ -19,12 +19,12 @@ const ( type ( uploadingSectorsCache struct { - mu sync.Mutex - uploads map[api.UploadID]*ongoingUpload + mu sync.Mutex + uploads map[api.UploadID]*ongoingUpload + renewedTo map[types.FileContractID]types.FileContractID } ongoingUpload struct { - mu sync.Mutex started time.Time contractSectors map[types.FileContractID][]types.Hash256 } @@ -32,82 +32,92 @@ type ( func newUploadingSectorsCache() *uploadingSectorsCache { return &uploadingSectorsCache{ - uploads: make(map[api.UploadID]*ongoingUpload), + uploads: make(map[api.UploadID]*ongoingUpload), + renewedTo: make(map[types.FileContractID]types.FileContractID), } } func (ou *ongoingUpload) addSector(fcid types.FileContractID, root types.Hash256) { - ou.mu.Lock() - defer ou.mu.Unlock() ou.contractSectors[fcid] = append(ou.contractSectors[fcid], root) } func (ou *ongoingUpload) sectors(fcid types.FileContractID) (roots []types.Hash256) { - ou.mu.Lock() - defer ou.mu.Unlock() if sectors, exists := ou.contractSectors[fcid]; exists && time.Since(ou.started) < cacheExpiry { roots = append(roots, sectors...) } return } -func (usc *uploadingSectorsCache) addUploadingSector(uID api.UploadID, fcid types.FileContractID, root types.Hash256) error { - // fetch ongoing upload +func (usc *uploadingSectorsCache) AddSector(uID api.UploadID, fcid types.FileContractID, root types.Hash256) error { usc.mu.Lock() - ongoing, exists := usc.uploads[uID] - usc.mu.Unlock() + defer usc.mu.Unlock() - // add sector if upload exists - if exists { - ongoing.addSector(fcid, root) - return nil + ongoing, ok := usc.uploads[uID] + if !ok { + return fmt.Errorf("%w; id '%v'", api.ErrUnknownUpload, uID) } - return fmt.Errorf("%w; id '%v'", api.ErrUnknownUpload, uID) + fcid = usc.latestFCID(fcid) + ongoing.addSector(fcid, root) + return nil } -func (usc *uploadingSectorsCache) pending(fcid types.FileContractID) (size uint64) { +func (usc *uploadingSectorsCache) FinishUpload(uID api.UploadID) { usc.mu.Lock() - var uploads []*ongoingUpload - for _, ongoing := range usc.uploads { - uploads = append(uploads, ongoing) + defer usc.mu.Unlock() + delete(usc.uploads, uID) + + // prune expired uploads + for uID, ongoing := range usc.uploads { + if time.Since(ongoing.started) > cacheExpiry { + delete(usc.uploads, uID) + } } - usc.mu.Unlock() - for _, ongoing := range uploads { - size += uint64(len(ongoing.sectors(fcid))) * rhp.SectorSize + // prune renewed to map + for old, new := range usc.renewedTo { + if _, exists := usc.renewedTo[new]; exists { + delete(usc.renewedTo, old) + } } - return } -func (usc *uploadingSectorsCache) sectors(fcid types.FileContractID) (roots []types.Hash256) { +func (usc *uploadingSectorsCache) HandleRenewal(fcid, renewedFrom types.FileContractID) { usc.mu.Lock() - var uploads []*ongoingUpload - for _, ongoing := range usc.uploads { - uploads = append(uploads, ongoing) + defer usc.mu.Unlock() + + for _, upload := range usc.uploads { + if _, exists := upload.contractSectors[renewedFrom]; exists { + upload.contractSectors[fcid] = upload.contractSectors[renewedFrom] + upload.contractSectors[renewedFrom] = nil + } } - usc.mu.Unlock() + usc.renewedTo[renewedFrom] = fcid +} - for _, ongoing := range uploads { - roots = append(roots, ongoing.sectors(fcid)...) +func (usc *uploadingSectorsCache) Pending(fcid types.FileContractID) (size uint64) { + usc.mu.Lock() + defer usc.mu.Unlock() + + fcid = usc.latestFCID(fcid) + for _, ongoing := range usc.uploads { + size += uint64(len(ongoing.sectors(fcid))) * rhp.SectorSize } return } -func (usc *uploadingSectorsCache) finishUpload(uID api.UploadID) { +func (usc *uploadingSectorsCache) Sectors(fcid types.FileContractID) (roots []types.Hash256) { usc.mu.Lock() defer usc.mu.Unlock() - delete(usc.uploads, uID) - // prune expired uploads - for uID, ongoing := range usc.uploads { - if time.Since(ongoing.started) > cacheExpiry { - delete(usc.uploads, uID) - } + fcid = usc.latestFCID(fcid) + for _, ongoing := range usc.uploads { + roots = append(roots, ongoing.sectors(fcid)...) } + return } -func (usc *uploadingSectorsCache) trackUpload(uID api.UploadID) error { +func (usc *uploadingSectorsCache) StartUpload(uID api.UploadID) error { usc.mu.Lock() defer usc.mu.Unlock() @@ -122,3 +132,10 @@ func (usc *uploadingSectorsCache) trackUpload(uID api.UploadID) error { } return nil } + +func (usc *uploadingSectorsCache) latestFCID(fcid types.FileContractID) types.FileContractID { + if latest, ok := usc.renewedTo[fcid]; ok { + return latest + } + return fcid +} diff --git a/bus/uploadingsectors_test.go b/bus/uploadingsectors_test.go index 244280c70..b1c9b725a 100644 --- a/bus/uploadingsectors_test.go +++ b/bus/uploadingsectors_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" "lukechampine.com/frand" @@ -15,20 +16,24 @@ func TestUploadingSectorsCache(t *testing.T) { uID1 := newTestUploadID() uID2 := newTestUploadID() - c.trackUpload(uID1) - c.trackUpload(uID2) + fcid1 := types.FileContractID{1} + fcid2 := types.FileContractID{2} + fcid3 := types.FileContractID{3} - _ = c.addUploadingSector(uID1, types.FileContractID{1}, types.Hash256{1}) - _ = c.addUploadingSector(uID1, types.FileContractID{2}, types.Hash256{2}) - _ = c.addUploadingSector(uID2, types.FileContractID{2}, types.Hash256{3}) + c.StartUpload(uID1) + c.StartUpload(uID2) - if roots1 := c.sectors(types.FileContractID{1}); len(roots1) != 1 || roots1[0] != (types.Hash256{1}) { + _ = c.AddSector(uID1, fcid1, types.Hash256{1}) + _ = c.AddSector(uID1, fcid2, types.Hash256{2}) + _ = c.AddSector(uID2, fcid2, types.Hash256{3}) + + if roots1 := c.Sectors(fcid1); len(roots1) != 1 || roots1[0] != (types.Hash256{1}) { t.Fatal("unexpected cached sectors") } - if roots2 := c.sectors(types.FileContractID{2}); len(roots2) != 2 { + if roots2 := c.Sectors(fcid2); len(roots2) != 2 { t.Fatal("unexpected cached sectors", roots2) } - if roots3 := c.sectors(types.FileContractID{3}); len(roots3) != 0 { + if roots3 := c.Sectors(fcid3); len(roots3) != 0 { t.Fatal("unexpected cached sectors") } @@ -39,28 +44,73 @@ func TestUploadingSectorsCache(t *testing.T) { t.Fatal("unexpected") } - c.finishUpload(uID1) - if roots1 := c.sectors(types.FileContractID{1}); len(roots1) != 0 { + c.FinishUpload(uID1) + if roots1 := c.Sectors(fcid1); len(roots1) != 0 { t.Fatal("unexpected cached sectors") } - if roots2 := c.sectors(types.FileContractID{2}); len(roots2) != 1 || roots2[0] != (types.Hash256{3}) { + if roots2 := c.Sectors(fcid2); len(roots2) != 1 || roots2[0] != (types.Hash256{3}) { t.Fatal("unexpected cached sectors") } - c.finishUpload(uID2) - if roots2 := c.sectors(types.FileContractID{1}); len(roots2) != 0 { + c.FinishUpload(uID2) + if roots2 := c.Sectors(fcid1); len(roots2) != 0 { t.Fatal("unexpected cached sectors") } - if err := c.addUploadingSector(uID1, types.FileContractID{1}, types.Hash256{1}); !errors.Is(err, api.ErrUnknownUpload) { + if err := c.AddSector(uID1, fcid1, types.Hash256{1}); !errors.Is(err, api.ErrUnknownUpload) { t.Fatal("unexpected error", err) } - if err := c.trackUpload(uID1); err != nil { + if err := c.StartUpload(uID1); err != nil { t.Fatal("unexpected error", err) } - if err := c.trackUpload(uID1); !errors.Is(err, api.ErrUploadAlreadyExists) { + if err := c.StartUpload(uID1); !errors.Is(err, api.ErrUploadAlreadyExists) { t.Fatal("unexpected error", err) } + + // reset cache + c = newUploadingSectorsCache() + + // track upload that uploads across two contracts + c.StartUpload(uID1) + c.AddSector(uID1, fcid1, types.Hash256{1}) + c.AddSector(uID1, fcid1, types.Hash256{2}) + c.HandleRenewal(fcid2, fcid1) + c.AddSector(uID1, fcid2, types.Hash256{3}) + c.AddSector(uID1, fcid2, types.Hash256{4}) + + // assert pending sizes for both contracts should be 4 sectors + p1 := c.Pending(fcid1) + p2 := c.Pending(fcid2) + if p1 != p2 || p1 != 4*rhpv2.SectorSize { + t.Fatal("unexpected pending size", p1/rhpv2.SectorSize, p2/rhpv2.SectorSize) + } + + // assert sectors for both contracts contain 4 sectors + s1 := c.Sectors(fcid1) + s2 := c.Sectors(fcid2) + if len(s1) != 4 || len(s2) != 4 { + t.Fatal("unexpected sectors", len(s1), len(s2)) + } + + // finish upload + c.FinishUpload(uID1) + s1 = c.Sectors(fcid1) + s2 = c.Sectors(fcid2) + if len(s1) != 0 || len(s2) != 0 { + t.Fatal("unexpected sectors", len(s1), len(s2)) + } + + // renew the contract + c.HandleRenewal(fcid3, fcid2) + + // trigger pruning + c.StartUpload(uID2) + c.FinishUpload(uID2) + + // assert renewedTo gets pruned + if len(c.renewedTo) != 1 { + t.Fatal("unexpected", len(c.renewedTo)) + } } func newTestUploadID() api.UploadID { From 9fc5127239739534abc96067e24a37f3c7a35fe3 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 12:49:13 +0100 Subject: [PATCH 129/201] worker: remove some debug logging again --- worker/rhpv2.go | 38 ++++++++++---------------------------- worker/upload.go | 3 --- worker/uploader.go | 5 ----- 3 files changed, 10 insertions(+), 36 deletions(-) diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 749e7e547..7207b96fa 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -16,7 +16,6 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/siad/build" "go.sia.tech/siad/crypto" - "go.uber.org/zap" "lukechampine.com/frand" ) @@ -287,16 +286,6 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types err = w.withContractLock(ctx, fcid, lockingPriorityPruning, func() error { return w.withTransportV2(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { return w.withRevisionV2(defaultLockTimeout, t, hostKey, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { - id := frand.Entropy128() - logger := w.logger. - With("id", hex.EncodeToString(id[:])). - With("hostKey", hostKey). - With("hostVersion", settings.Version). - With("fcid", fcid). - With("revisionNumber", rev.Revision.RevisionNumber). - With("lastKnownRevisionNumber", lastKnownRevisionNumber). - Named("pruneContract") - // perform gouging checks gc, err := GougingCheckerFromContext(ctx, false) if err != nil { @@ -317,12 +306,6 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types if err != nil { return err } - for _, root := range pending { - logger.With("root", root).Debug("pending root") - } - for _, root := range want { - logger.With("root", root).Debug("wanted root") - } keep := make(map[types.Hash256]struct{}) for _, root := range append(want, pending...) { keep[root] = struct{}{} @@ -335,7 +318,6 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types delete(keep, root) // prevent duplicates continue } - logger.With("index", i).With("root", root).Debug("collected root for pruning") indices = append(indices, uint64(i)) } if len(indices) == 0 { @@ -343,7 +325,7 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types } // delete the roots from the contract - deleted, err = w.deleteContractRoots(t, &rev, settings, logger, indices) + deleted, err = w.deleteContractRoots(t, &rev, settings, indices) if deleted < uint64(len(indices)) { remaining = uint64(len(indices)) - deleted } @@ -358,7 +340,15 @@ func (w *worker) PruneContract(ctx context.Context, hostIP string, hostKey types return } -func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, logger *zap.SugaredLogger, indices []uint64) (deleted uint64, err error) { +func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, indices []uint64) (deleted uint64, err error) { + id := frand.Entropy128() + logger := w.logger. + With("id", hex.EncodeToString(id[:])). + With("hostKey", rev.HostKey()). + With("hostVersion", settings.Version). + With("fcid", rev.ID()). + With("revisionNumber", rev.Revision.RevisionNumber). + Named("deleteContractRoots") logger.Infow(fmt.Sprintf("deleting %d contract roots (%v)", len(indices), humanReadableSize(len(indices)*rhpv2.SectorSize)), "hk", rev.HostKey(), "fcid", rev.ID()) // return early @@ -526,14 +516,6 @@ func (w *worker) deleteContractRoots(t *rhpv2.Transport, rev *rhpv2.ContractRevi // record spending w.contractSpendingRecorder.Record(rev.Revision, api.ContractSpending{Deletions: cost}) - - for _, action := range actions { - if action.Type == rhpv2.RPCWriteActionSwap { - logger.With("index", action.B).Debug("successfully swapped sector") - } else if action.Type == rhpv2.RPCWriteActionTrim { - logger.With("n", action.A).Debug("successfully trimmed sectors") - } - } return nil }(); err != nil { return diff --git a/worker/upload.go b/worker/upload.go index 414dcc1c2..bc419d703 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -616,11 +616,9 @@ func (mgr *uploadManager) UploadShards(ctx context.Context, s object.Slab, shard } // track the upload in the bus - logger := mgr.logger.With("uploadID", hex.EncodeToString(upload.id[:])) if err := mgr.os.TrackUpload(ctx, upload.id); err != nil { return fmt.Errorf("failed to track upload '%v', err: %w", upload.id, err) } - logger.Debug("tracking upload") // defer a function that finishes the upload defer func() { @@ -628,7 +626,6 @@ func (mgr *uploadManager) UploadShards(ctx context.Context, s object.Slab, shard if err := mgr.os.FinishUpload(ctx, upload.id); err != nil { mgr.logger.Errorf("failed to mark upload %v as finished: %v", upload.id, err) } - logger.Debug("finished upload") cancel() }() diff --git a/worker/uploader.go b/worker/uploader.go index 6a4e0232e..403accbc8 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -2,7 +2,6 @@ package worker import ( "context" - "encoding/hex" "errors" "fmt" "math" @@ -231,13 +230,9 @@ func (u *uploader) execute(req *sectorUploadReq) (time.Duration, error) { } // update the bus - logger := u.logger.With("uploadID", hex.EncodeToString(req.uploadID[:])). - With("root", req.sector.root). - With("fcid", fcid) if err := u.os.AddUploadingSector(ctx, req.uploadID, fcid, req.sector.root); err != nil { return 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) } - logger.Debug("added uploading sector") // upload the sector start := time.Now() From a6c306d7c11b21894872209863c095a76b5e7717 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 13:41:45 +0100 Subject: [PATCH 130/201] worker: update maxRevisionCost check to match the way hostd computes LatestRevisionCost --- worker/gouging.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/gouging.go b/worker/gouging.go index e8e362040..38d4aa088 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -272,7 +272,7 @@ func checkPriceGougingPT(gs api.GougingSettings, cs api.ConsensusState, txnFee t } // check LatestRevisionCost - expect sane value - maxRevisionCost := gs.MaxDownloadPrice.Div64(1 << 40).Mul64(4096) + maxRevisionCost := gs.MaxRPCPrice.Add(gs.MaxDownloadPrice.Div64(1 << 40).Mul64(2048)) if pt.LatestRevisionCost.Cmp(maxRevisionCost) > 0 { return fmt.Errorf("LatestRevisionCost of %v exceeds maximum cost of %v", pt.LatestRevisionCost, maxRevisionCost) } From aaab735d70dac8cf7dfb6b9d5956ed8c054f6c4f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 14:11:09 +0100 Subject: [PATCH 131/201] e2e: fix TestGouging --- worker/gouging.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/worker/gouging.go b/worker/gouging.go index 38d4aa088..0345385b4 100644 --- a/worker/gouging.go +++ b/worker/gouging.go @@ -272,7 +272,10 @@ func checkPriceGougingPT(gs api.GougingSettings, cs api.ConsensusState, txnFee t } // check LatestRevisionCost - expect sane value - maxRevisionCost := gs.MaxRPCPrice.Add(gs.MaxDownloadPrice.Div64(1 << 40).Mul64(2048)) + maxRevisionCost, overflow := gs.MaxRPCPrice.AddWithOverflow(gs.MaxDownloadPrice.Div64(1 << 40).Mul64(2048)) + if overflow { + maxRevisionCost = types.MaxCurrency + } if pt.LatestRevisionCost.Cmp(maxRevisionCost) > 0 { return fmt.Errorf("LatestRevisionCost of %v exceeds maximum cost of %v", pt.LatestRevisionCost, maxRevisionCost) } From 2f42d58ed8cb37fe36f8c20ebbeab0e7b67cde97 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 27 Mar 2024 16:42:38 +0100 Subject: [PATCH 132/201] api: omit unusablereasons if empty --- api/host.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/host.go b/api/host.go index c1e72aa16..030f2f3cf 100644 --- a/api/host.go +++ b/api/host.go @@ -80,7 +80,7 @@ type ( Score float64 `json:"score"` ScoreBreakdown HostScoreBreakdown `json:"scoreBreakdown"` Usable bool `json:"usable"` - UnusableReasons []string `json:"unusableReasons"` + UnusableReasons []string `json:"unusableReasons,omitempty"` } ) From 499f913110b527817facf4044b0d637203af75f0 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 17:17:41 +0100 Subject: [PATCH 133/201] go.mod: upgrade core dependency --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 22515806c..90b704c6a 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/minio/minio-go/v7 v7.0.69 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe - go.sia.tech/core v0.2.1 + go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.1 go.sia.tech/hostd v1.0.3 diff --git a/go.sum b/go.sum index 81612096a..16686ff24 100644 --- a/go.sum +++ b/go.sum @@ -241,8 +241,8 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= -go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= +go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 h1:jsiab6uAUkaeDL7XEseAxJw7NVhxLNoU2WaB0AHbgG8= +go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.1 h1:8vtYH/B17NJ4GXLWiONfhwBrrmtJtYiofnO3PfjU298= From ce9f015b6fa2bd33bae644d227c58677d8417994 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 17:28:49 +0100 Subject: [PATCH 134/201] e2e: update hostd dependency --- go.mod | 11 ++++++----- go.sum | 26 ++++++++++++++------------ internal/test/e2e/host.go | 10 +++++++++- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 90b704c6a..39938cb02 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.1 - go.sia.tech/hostd v1.0.3 + go.sia.tech/hostd v1.0.4-0.20240327150808-8c407121ad92 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca @@ -32,8 +32,8 @@ require ( require ( github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go v1.50.1 // indirect - github.com/cloudflare/cloudflare-go v0.86.0 // indirect + github.com/aws/aws-sdk-go v1.51.7 // indirect + github.com/cloudflare/cloudflare-go v0.91.0 // indirect github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-sql-driver/mysql v1.7.1 // indirect @@ -51,7 +51,7 @@ require ( github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/klauspost/compress v1.17.6 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect - github.com/mattn/go-sqlite3 v1.14.18 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -60,6 +60,7 @@ require ( github.com/rs/xid v1.5.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect + github.com/shopspring/decimal v1.3.1 // indirect gitlab.com/NebulousLabs/bolt v1.4.4 // indirect gitlab.com/NebulousLabs/demotemutex v0.0.0-20151003192217-235395f71c40 // indirect gitlab.com/NebulousLabs/entropy-mnemonics v0.0.0-20181018051301-7532f67e3500 // indirect @@ -75,7 +76,7 @@ require ( gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.22.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index 16686ff24..db1951792 100644 --- a/go.sum +++ b/go.sum @@ -9,15 +9,15 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.50.1 h1:AwnLUM7TcH9vMZqA4TcDKmGfLmDW5VXwT5tPH6kXylo= -github.com/aws/aws-sdk-go v1.50.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= +github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.86.0 h1:jEKN5VHNYNYtfDL2lUFLTRo+nOVNPFxpXTstVx0rqHI= -github.com/cloudflare/cloudflare-go v0.86.0/go.mod h1:wYW/5UP02TUfBToa/yKbQHV+r6h1NnJ1Je7XjuGM4Jw= +github.com/cloudflare/cloudflare-go v0.91.0 h1:L7IR+86qrZuEMSjGFg4cwRwtHqC8uCPmMUkP7BD4CPw= +github.com/cloudflare/cloudflare-go v0.91.0/go.mod h1:nUqvBUUDRxNzsDSQjbqUNWHEIYAoUlgRmcAzMKlFdKs= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -132,8 +132,8 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= -github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= @@ -179,6 +179,8 @@ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5P github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -195,8 +197,8 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y= @@ -247,8 +249,8 @@ go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.1 h1:8vtYH/B17NJ4GXLWiONfhwBrrmtJtYiofnO3PfjU298= go.sia.tech/gofakes3 v0.0.1/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= -go.sia.tech/hostd v1.0.3 h1:BCaFg6DGf33JEH/5DqFj6cnaz3EbiyjpbhfSj/Lo6e8= -go.sia.tech/hostd v1.0.3/go.mod h1:R+01UddrgmAUcdBkEO8VcnYqPX/mod45DC5m/v/crzE= +go.sia.tech/hostd v1.0.4-0.20240327150808-8c407121ad92 h1:raFT28huR0i/njUr13hJElpso/Zk631gKq2Vkg27hYE= +go.sia.tech/hostd v1.0.4-0.20240327150808-8c407121ad92/go.mod h1:s1W4/Okfcs2rGM3sC7xL95HY+I/oJ0Dsix3zTER+hpQ= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= @@ -313,8 +315,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/internal/test/e2e/host.go b/internal/test/e2e/host.go index 6100adad5..36589b581 100644 --- a/internal/test/e2e/host.go +++ b/internal/test/e2e/host.go @@ -220,7 +220,15 @@ func NewHost(privKey types.PrivateKey, dir string, network *consensus.Network, d return nil, fmt.Errorf("failed to create rhp3 listener: %w", err) } - settings, err := settings.NewConfigManager(dir, privKey, rhp2Listener.Addr().String(), db, cm, tp, wallet, am, log.Named("settings")) + settings, err := settings.NewConfigManager( + settings.WithHostKey(privKey), + settings.WithRHP2Addr(rhp2Listener.Addr().String()), + settings.WithStore(db), + settings.WithChainManager(cm), + settings.WithTransactionPool(tp), + settings.WithWallet(wallet), + settings.WithAlertManager(am), + settings.WithLog(log.Named("settings"))) if err != nil { return nil, fmt.Errorf("failed to create settings manager: %w", err) } From 2dc9f493234e78d2c2aef3a3f07fbfc28013e70c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 27 Mar 2024 17:42:03 +0100 Subject: [PATCH 135/201] e2e: extend TestNewTestCluster --- internal/test/e2e/cluster_test.go | 3 ++ stores/hostdb.go | 61 +------------------------------ stores/metadata.go | 2 +- 3 files changed, 6 insertions(+), 60 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 77898d4cf..71507a8ff 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -169,6 +169,9 @@ func TestNewTestCluster(t *testing.T) { if reflect.DeepEqual(hi.Host, hostdb.Host{}) { t.Fatal("host wasn't set") } + if hi.Host.Settings.Release == "" { + t.Fatal("release should be set") + } } hostInfos, err := cluster.Autopilot.HostInfos(context.Background(), api.HostFilterModeAll, api.UsabilityFilterModeAll, "", nil, 0, -1) tt.OK(err) diff --git a/stores/hostdb.go b/stores/hostdb.go index b91f85601..0b281a0ec 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -159,63 +159,6 @@ type ( } ) -// convert converts hostSettings to rhp.HostSettings -func (s hostSettings) convert() rhpv2.HostSettings { - return rhpv2.HostSettings{ - AcceptingContracts: s.AcceptingContracts, - MaxDownloadBatchSize: s.MaxDownloadBatchSize, - MaxDuration: s.MaxDuration, - MaxReviseBatchSize: s.MaxReviseBatchSize, - NetAddress: s.NetAddress, - RemainingStorage: s.RemainingStorage, - SectorSize: s.SectorSize, - TotalStorage: s.TotalStorage, - Address: s.Address, - WindowSize: s.WindowSize, - Collateral: s.Collateral, - MaxCollateral: s.MaxCollateral, - BaseRPCPrice: s.BaseRPCPrice, - ContractPrice: s.ContractPrice, - DownloadBandwidthPrice: s.DownloadBandwidthPrice, - SectorAccessPrice: s.SectorAccessPrice, - StoragePrice: s.StoragePrice, - UploadBandwidthPrice: s.UploadBandwidthPrice, - EphemeralAccountExpiry: s.EphemeralAccountExpiry, - MaxEphemeralAccountBalance: s.MaxEphemeralAccountBalance, - RevisionNumber: s.RevisionNumber, - Version: s.Version, - SiaMuxPort: s.SiaMuxPort, - } -} - -func convertHostSettings(settings rhpv2.HostSettings) hostSettings { - return hostSettings{ - AcceptingContracts: settings.AcceptingContracts, - MaxDownloadBatchSize: settings.MaxDownloadBatchSize, - MaxDuration: settings.MaxDuration, - MaxReviseBatchSize: settings.MaxReviseBatchSize, - NetAddress: settings.NetAddress, - RemainingStorage: settings.RemainingStorage, - SectorSize: settings.SectorSize, - TotalStorage: settings.TotalStorage, - Address: settings.Address, - WindowSize: settings.WindowSize, - Collateral: settings.Collateral, - MaxCollateral: settings.MaxCollateral, - BaseRPCPrice: settings.BaseRPCPrice, - ContractPrice: settings.ContractPrice, - DownloadBandwidthPrice: settings.DownloadBandwidthPrice, - SectorAccessPrice: settings.SectorAccessPrice, - StoragePrice: settings.StoragePrice, - UploadBandwidthPrice: settings.UploadBandwidthPrice, - EphemeralAccountExpiry: settings.EphemeralAccountExpiry, - MaxEphemeralAccountBalance: settings.MaxEphemeralAccountBalance, - RevisionNumber: settings.RevisionNumber, - Version: settings.Version, - SiaMuxPort: settings.SiaMuxPort, - } -} - // convert converts hostSettings to rhp.HostSettings func (pt hostPriceTable) convert() rhpv3.HostPriceTable { return rhpv3.HostPriceTable{ @@ -343,7 +286,7 @@ func (h dbHost) convert(blocked bool) api.Host { }, PublicKey: types.PublicKey(h.PublicKey), Scanned: h.Scanned, - Settings: h.Settings.convert(), + Settings: rhpv2.HostSettings(h.Settings), }, Blocked: blocked, Checks: checks, @@ -886,7 +829,7 @@ func (ss *SQLStore) RecordHostScans(ctx context.Context, scans []hostdb.HostScan // overwrite the NetAddress in the settings with the one we // received through the host announcement scan.Settings.NetAddress = host.NetAddress - host.Settings = convertHostSettings(scan.Settings) + host.Settings = hostSettings(scan.Settings) // scans can only update the price table if the current // pricetable is expired anyway, ensuring scans never diff --git a/stores/metadata.go b/stores/metadata.go index e44cb3a63..9c586789b 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -357,7 +357,7 @@ func (c dbContract) convert() api.ContractMetadata { ID: types.FileContractID(c.FCID), HostIP: c.Host.NetAddress, HostKey: types.PublicKey(c.Host.PublicKey), - SiamuxAddr: c.Host.Settings.convert().SiamuxAddr(), + SiamuxAddr: rhpv2.HostSettings(c.Host.Settings).SiamuxAddr(), RenewedFrom: types.FileContractID(c.RenewedFrom), TotalCost: types.Currency(c.TotalCost), From c3155f8619b7a972f9e6156ed0243f008b300419 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 28 Mar 2024 09:25:49 +0100 Subject: [PATCH 136/201] Fix "transaction spends nonexisting siacoin output" when refreshing contracts (#1111) `UnconfirmedParents` would only ever return direct parents for a txn but not grandparents and so on. Leading to hosts not knowing about certain parents yet and txns failing. --- internal/node/transactionpool.go | 41 +++++++++++++++++---------- internal/node/transactionpool_test.go | 40 ++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 15 deletions(-) create mode 100644 internal/node/transactionpool_test.go diff --git a/internal/node/transactionpool.go b/internal/node/transactionpool.go index c5582a757..5713c1d12 100644 --- a/internal/node/transactionpool.go +++ b/internal/node/transactionpool.go @@ -2,6 +2,7 @@ package node import ( "errors" + "slices" "go.sia.tech/core/types" "go.sia.tech/renterd/bus" @@ -41,7 +42,18 @@ func (tp txpool) AcceptTransactionSet(txns []types.Transaction) error { } func (tp txpool) UnconfirmedParents(txn types.Transaction) ([]types.Transaction, error) { - pool := tp.Transactions() + return unconfirmedParents(txn, tp.Transactions()), nil +} + +func (tp txpool) Subscribe(subscriber modules.TransactionPoolSubscriber) { + tp.tp.TransactionPoolSubscribe(subscriber) +} + +func (tp txpool) Close() error { + return tp.tp.Close() +} + +func unconfirmedParents(txn types.Transaction, pool []types.Transaction) []types.Transaction { outputToParent := make(map[types.SiacoinOutputID]*types.Transaction) for i, txn := range pool { for j := range txn.SiacoinOutputs { @@ -49,24 +61,23 @@ func (tp txpool) UnconfirmedParents(txn types.Transaction) ([]types.Transaction, } } var parents []types.Transaction + txnsToCheck := []*types.Transaction{&txn} seen := make(map[types.TransactionID]bool) - for _, sci := range txn.SiacoinInputs { - if parent, ok := outputToParent[sci.ParentID]; ok { - if txid := parent.ID(); !seen[txid] { - seen[txid] = true - parents = append(parents, *parent) + for len(txnsToCheck) > 0 { + nextTxn := txnsToCheck[0] + txnsToCheck = txnsToCheck[1:] + for _, sci := range nextTxn.SiacoinInputs { + if parent, ok := outputToParent[sci.ParentID]; ok { + if txid := parent.ID(); !seen[txid] { + seen[txid] = true + parents = append(parents, *parent) + txnsToCheck = append(txnsToCheck, parent) + } } } } - return parents, nil -} - -func (tp txpool) Subscribe(subscriber modules.TransactionPoolSubscriber) { - tp.tp.TransactionPoolSubscribe(subscriber) -} - -func (tp txpool) Close() error { - return tp.tp.Close() + slices.Reverse(parents) + return parents } func NewTransactionPool(tp modules.TransactionPool) bus.TransactionPool { diff --git a/internal/node/transactionpool_test.go b/internal/node/transactionpool_test.go new file mode 100644 index 000000000..c24e2c190 --- /dev/null +++ b/internal/node/transactionpool_test.go @@ -0,0 +1,40 @@ +package node + +import ( + "reflect" + "testing" + + "go.sia.tech/core/types" +) + +func TestUnconfirmedParents(t *testing.T) { + grandparent := types.Transaction{ + SiacoinOutputs: []types.SiacoinOutput{{}}, + } + parent := types.Transaction{ + SiacoinInputs: []types.SiacoinInput{ + { + ParentID: grandparent.SiacoinOutputID(0), + }, + }, + SiacoinOutputs: []types.SiacoinOutput{{}}, + } + txn := types.Transaction{ + SiacoinInputs: []types.SiacoinInput{ + { + ParentID: parent.SiacoinOutputID(0), + }, + }, + SiacoinOutputs: []types.SiacoinOutput{{}}, + } + pool := []types.Transaction{grandparent, parent} + + parents := unconfirmedParents(txn, pool) + if len(parents) != 2 { + t.Fatalf("expected 2 parents, got %v", len(parents)) + } else if !reflect.DeepEqual(parents[0], grandparent) { + t.Fatalf("expected grandparent") + } else if !reflect.DeepEqual(parents[1], parent) { + t.Fatalf("expected parent") + } +} From a207533abccb36b69f0421f25aaa3534e21876c3 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 28 Mar 2024 09:58:47 +0100 Subject: [PATCH 137/201] main: add --env cli flag and autostart --- cmd/renterd/main.go | 86 ++++++++++++++++++++++++++++---------------- config/config.go | 5 +-- docker/entrypoint.sh | 4 +-- 3 files changed, 61 insertions(+), 34 deletions(-) diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 03c27629f..33fe90f67 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -10,6 +10,7 @@ import ( "net" "net/http" "os" + "os/exec" "os/signal" "path/filepath" "runtime" @@ -68,8 +69,9 @@ Usage: var ( cfg = config.Config{ - Directory: ".", - Seed: os.Getenv("RENTERD_SEED"), + Directory: ".", + Seed: os.Getenv("RENTERD_SEED"), + AutoOpenWebUI: true, HTTP: config.HTTP{ Address: build.DefaultAPIAddress, Password: os.Getenv("RENTERD_API_PASSWORD"), @@ -143,15 +145,9 @@ var ( KeypairsV4: nil, }, } - seed types.PrivateKey + disableStdin bool ) -func check(context string, err error) { - if err != nil { - log.Fatalf("%v: %v", context, err) - } -} - func mustLoadAPIPassword() { if cfg.HTTP.Password != "" { return @@ -166,25 +162,6 @@ func mustLoadAPIPassword() { cfg.HTTP.Password = string(pw) } -func getSeed() types.PrivateKey { - if seed == nil { - phrase := cfg.Seed - if phrase == "" { - fmt.Print("Enter seed: ") - pw, err := term.ReadPassword(int(os.Stdin.Fd())) - check("Could not read seed phrase:", err) - fmt.Println() - phrase = string(pw) - } - var rawSeed [32]byte - if err := wallet.SeedFromPhrase(&rawSeed, phrase); err != nil { - panic(err) - } - seed = wallet.KeyFromSeed(&rawSeed, 0) - } - return seed -} - func mustParseWorkers(workers, password string) { if workers == "" { return @@ -271,6 +248,8 @@ func main() { // node flag.StringVar(&cfg.HTTP.Address, "http", cfg.HTTP.Address, "Address for serving the API") flag.StringVar(&cfg.Directory, "dir", cfg.Directory, "Directory for storing node state") + flag.BoolVar(&disableStdin, "env", false, "disable stdin prompts for environment variables (default false)") + flag.BoolVar(&cfg.AutoOpenWebUI, "openui", cfg.AutoOpenWebUI, "automatically open the web UI on startup") // logger flag.StringVar(&cfg.Log.Level, "log.level", cfg.Log.Level, "Global logger level (debug|info|warn|error). Defaults to 'info' (overrides with RENTERD_LOG_LEVEL)") @@ -410,6 +389,30 @@ func main() { parseEnvVar("RENTERD_LOG_DATABASE_IGNORE_RECORD_NOT_FOUND_ERROR", &cfg.Log.Database.IgnoreRecordNotFoundError) parseEnvVar("RENTERD_LOG_DATABASE_SLOW_THRESHOLD", &cfg.Log.Database.SlowThreshold) + // check that the API password is set + if cfg.HTTP.Password == "" { + if disableStdin { + stdoutFatalError("API password must be set via environment variable or config file when --env flag is set") + return + } + setAPIPassword() + } + + // check that the seed is set + if cfg.Seed == "" { + if disableStdin { + stdoutFatalError("Seed must be set via environment variable or config file when --env flag is set") + return + } + setSeedPhrase() + } + + var rawSeed [32]byte + if err := wallet.SeedFromPhrase(&rawSeed, cfg.Seed); err != nil { + log.Fatal("failed to load wallet", zap.Error(err)) + } + seed := wallet.KeyFromSeed(&rawSeed, 0) + if cfg.S3.Enabled { var keyPairsV4 string parseEnvVar("RENTERD_S3_KEYPAIRS_V4", &keyPairsV4) @@ -541,7 +544,7 @@ func main() { busAddr, busPassword := cfg.Bus.RemoteAddr, cfg.Bus.RemotePassword if cfg.Bus.RemoteAddr == "" { - b, fn, err := node.NewBus(busCfg, cfg.Directory, getSeed(), logger) + b, fn, err := node.NewBus(busCfg, cfg.Directory, seed, logger) if err != nil { logger.Fatal("failed to create bus, err: " + err.Error()) } @@ -566,7 +569,7 @@ func main() { var workers []autopilot.Worker if len(cfg.Worker.Remotes) == 0 { if cfg.Worker.Enabled { - w, fn, err := node.NewWorker(cfg.Worker, bc, getSeed(), logger) + w, fn, err := node.NewWorker(cfg.Worker, bc, seed, logger) if err != nil { logger.Fatal("failed to create worker: " + err.Error()) } @@ -682,6 +685,16 @@ func main() { } } + if cfg.AutoOpenWebUI { + time.Sleep(time.Millisecond) // give the web server a chance to start + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + logger.Debug("failed to parse API address", zap.Error(err)) + } else if err := openBrowser(fmt.Sprintf("http://127.0.0.1:%s", port)); err != nil { + logger.Debug("failed to open browser", zap.Error(err)) + } + } + signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) select { @@ -714,6 +727,19 @@ func main() { os.Exit(exitCode) } +func openBrowser(url string) error { + switch runtime.GOOS { + case "linux": + return exec.Command("xdg-open", url).Start() + case "windows": + return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() + case "darwin": + return exec.Command("open", url).Start() + default: + return fmt.Errorf("unsupported platform %q", runtime.GOOS) + } +} + func runCompatMigrateAutopilotJSONToStore(bc *bus.Client, id, dir string) (err error) { // check if the file exists path := filepath.Join(dir, "autopilot.json") diff --git a/config/config.go b/config/config.go index 5f657b0a9..4a058a62d 100644 --- a/config/config.go +++ b/config/config.go @@ -7,8 +7,9 @@ import ( type ( // Config contains the configuration for a renterd node Config struct { - Seed string `yaml:"seed,omitempty"` - Directory string `yaml:"directory,omitempty"` + Seed string `yaml:"seed,omitempty"` + Directory string `yaml:"directory,omitempty"` + AutoOpenWebUI bool `yaml:"autoOpenWebUI,omitempty"` ShutdownTimeout time.Duration `yaml:"shutdownTimeout,omitempty"` diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index bac2ba601..da8b4d8ce 100644 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/sh if [[ "$BUILD_TAGS" == *'testnet'* ]]; then - exec renterd -http=':9880' -s3.address=':7070' "$@" + exec renterd -env -http=':9880' -s3.address=':7070' "$@" else - exec renterd -http=':9980' -s3.address=':8080' "$@" + exec renterd -env -http=':9980' -s3.address=':8080' "$@" fi From 2cf588e4b4ba0b18d4bfe48ba613e7f7786fa485 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 28 Mar 2024 11:52:55 +0100 Subject: [PATCH 138/201] autopilot: address review comments --- api/worker.go | 4 ++-- autopilot/accounts.go | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/api/worker.go b/api/worker.go index 87f736da3..6d0c0e9d2 100644 --- a/api/worker.go +++ b/api/worker.go @@ -49,8 +49,8 @@ type ( // ContractsResponse is the response type for the /rhp/contracts endpoint. ContractsResponse struct { - Contracts []Contract `json:"contracts"` - Errors map[types.PublicKey]string + Contracts []Contract `json:"contracts"` + Errors map[types.PublicKey]string `json:"errors,omitempty"` // deprecated Error string `json:"error,omitempty"` diff --git a/autopilot/accounts.go b/autopilot/accounts.go index ec1e80558..2332a325f 100644 --- a/autopilot/accounts.go +++ b/autopilot/accounts.go @@ -149,6 +149,8 @@ func (a *accounts) refillWorkerAccounts(ctx context.Context, w Worker) { } if _, inSet := inContractSet[contract.ID]; inSet { a.l.Errorw(rerr.err.Error(), rerr.keysAndValues...) + } else { + a.l.Debugw(rerr.err.Error(), rerr.keysAndValues...) } } else { // dismiss alerts on success From a662afab91836910d109adba05384b37f940b1ad Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 28 Mar 2024 14:46:17 +0100 Subject: [PATCH 139/201] autopilot: allow for setting optional MinProtocolVersion --- api/autopilot.go | 5 +++++ autopilot/hostscore.go | 27 ++++++++++++++++++++++----- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/api/autopilot.go b/api/autopilot.go index 23425aacf..bea9a68a6 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -2,8 +2,10 @@ package api import ( "errors" + "fmt" "go.sia.tech/core/types" + "go.sia.tech/siad/build" ) const ( @@ -55,6 +57,7 @@ type ( HostsConfig struct { AllowRedundantIPs bool `json:"allowRedundantIPs"` MaxDowntimeHours uint64 `json:"maxDowntimeHours"` + MinProtocolVersion string `json:"minProtocolVersion"` MinRecentScanFailures uint64 `json:"minRecentScanFailures"` ScoreOverrides map[types.PublicKey]float64 `json:"scoreOverrides"` } @@ -123,6 +126,8 @@ type ( func (c AutopilotConfig) Validate() error { if c.Hosts.MaxDowntimeHours > 99*365*24 { return ErrMaxDowntimeHoursTooHigh + } else if c.Hosts.MinProtocolVersion != "" && !build.IsVersion(c.Hosts.MinProtocolVersion) { + return fmt.Errorf("invalid min protocol version '%s'", c.Hosts.MinProtocolVersion) } return nil } diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index fc98499f1..2791adb3d 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -12,7 +12,15 @@ import ( "go.sia.tech/siad/build" ) -const smallestValidScore = math.SmallestNonzeroFloat64 +const ( + // MinProtocolVersion is the minimum protocol version of a host that we + // accept. + minProtocolVersion = "1.5.9" + + // smallestValidScore is the smallest score that a host can have before + // being ignored. + smallestValidScore = math.SmallestNonzeroFloat64 +) func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { // idealDataPerHost is the amount of data that we would have to put on each @@ -37,7 +45,7 @@ func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedR Prices: priceAdjustmentScore(hostPeriodCost, cfg), StorageRemaining: storageRemainingScore(h.Settings, storedData, allocationPerHost), Uptime: uptimeScore(h), - Version: versionScore(h.Settings), + Version: versionScore(h.Settings, cfg.Hosts.MinProtocolVersion), } } @@ -237,13 +245,22 @@ func uptimeScore(h api.Host) float64 { return math.Pow(ratio, 200*math.Min(1-ratio, 0.30)) } -func versionScore(settings rhpv2.HostSettings) float64 { +func versionScore(settings rhpv2.HostSettings, minVersion string) float64 { + if minVersion == "" { + minVersion = minProtocolVersion + } versions := []struct { version string penalty float64 }{ - {"1.6.0", 0.99}, - {"1.5.9", 0.00}, + // latest protocol version + {"1.6.0", 0.10}, + + // user-defined minimum + {minVersion, 0.00}, + + // absolute minimum + {minProtocolVersion, 0.00}, } weight := 1.0 for _, v := range versions { From bd2aeac7b026747592cf719b96d0ae68e5ab578b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 28 Mar 2024 15:12:13 +0100 Subject: [PATCH 140/201] e2e: add TestHostMinVersion --- internal/test/e2e/gouging_test.go | 40 +++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 68dc264eb..27084ba1f 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -112,3 +112,43 @@ func TestGouging(t *testing.T) { return err }) } + +func TestHostMinVersion(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + // create a new test cluster + cluster := newTestCluster(t, testClusterOptions{ + hosts: int(test.AutopilotConfig.Contracts.Amount), + logger: newTestLoggerCustom(zapcore.ErrorLevel), + }) + defer cluster.Shutdown() + tt := cluster.tt + + // check number of contracts + contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ + ContractSet: test.AutopilotConfig.Contracts.Set, + }) + tt.OK(err) + if len(contracts) != int(test.AutopilotConfig.Contracts.Amount) { + t.Fatalf("expected %v contracts, got %v", test.AutopilotConfig.Contracts.Amount, len(contracts)) + } + + // set min version to a high value + cfg := test.AutopilotConfig + cfg.Hosts.MinProtocolVersion = "99.99.99" + cluster.UpdateAutopilotConfig(context.Background(), cfg) + + // contracts in set should drop to 0 + tt.Retry(100, time.Millisecond, func() error { + contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ + ContractSet: test.AutopilotConfig.Contracts.Set, + }) + tt.OK(err) + if len(contracts) != int(test.AutopilotConfig.Contracts.Amount) { + return fmt.Errorf("expected 0 contracts, got %v", len(contracts)) + } + return nil + }) +} From 8cc3017b7412fe6f792d221c1c960e5b13bf755e Mon Sep 17 00:00:00 2001 From: Alex Freska Date: Fri, 29 Mar 2024 11:00:58 -0400 Subject: [PATCH 141/201] ci: use shared ui update action --- .github/workflows/ui.yml | 64 ++++------------------------------------ 1 file changed, 6 insertions(+), 58 deletions(-) diff --git a/.github/workflows/ui.yml b/.github/workflows/ui.yml index a67ef9259..a5b5b440b 100644 --- a/.github/workflows/ui.yml +++ b/.github/workflows/ui.yml @@ -1,4 +1,4 @@ -name: Update UI +name: Update UI and open PR on: # Run daily @@ -11,61 +11,9 @@ jobs: update-ui: runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.21.0' - - - name: Check for new renterd tag in SiaFoundation/web - id: check-tag - env: - GH_TOKEN: ${{ github.token }} - run: | - # Fetch tags with pagination - TAGS_JSON=$(gh api --paginate repos/SiaFoundation/web/tags) - - # Extract tags that start with "renterd/", sort them in version order, and pick the highest version - LATEST_RENTERD_GO_TAG=$(echo "$TAGS_JSON" | jq -r '.[] | select(.name | startswith("renterd/")).name' | sort -Vr | head -n 1) - LATEST_RENTERD_VERSION=$(echo "$LATEST_RENTERD_GO_TAG" | sed 's/renterd\///') - - echo "Latest renterd tag is $LATEST_RENTERD_GO_TAG" - echo "GO_TAG=$LATEST_RENTERD_GO_TAG" >> $GITHUB_ENV - echo "VERSION=$LATEST_RENTERD_VERSION" >> $GITHUB_ENV - - - name: Fetch release notes for the release - id: release-notes - env: - GH_TOKEN: ${{ github.token }} - if: env.GO_TAG != 'null' - run: | - RELEASE_TAG_FORMATTED=$(echo "$GO_TAG" | sed 's/\/v/@/') - RELEASES_JSON=$(gh api --paginate repos/SiaFoundation/web/releases) - - RELEASE_NOTES=$(echo "$RELEASES_JSON" | jq -r --arg TAG_NAME "$RELEASE_TAG_FORMATTED" '.[] | select(.name == $TAG_NAME).body') - echo "Release notes for $RELEASE_TAG_FORMATTED: $RELEASE_NOTES" - echo "RELEASE_NOTES<> $GITHUB_ENV - echo "$RELEASE_NOTES" >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - - - name: Update go.mod with latest module - if: env.GO_TAG != 'null' - run: | - GO_MODULE_FORMATTED=$(echo "$GO_TAG" | sed 's/\//@/') - echo "Updating go.mod to use $GO_MODULE_FORMATTED" - go clean -modcache - go get go.sia.tech/web/$GO_MODULE_FORMATTED - go mod tidy - - - name: Create Pull Request - uses: peter-evans/create-pull-request@v5 - if: env.GO_TAG != 'null' + - name: Update UI and open PR + uses: SiaFoundation/workflows/.github/actions/ui-update@master with: - token: ${{ secrets.GITHUB_TOKEN }} - commit-message: "ui: ${{ env.VERSION }}" - title: "ui: ${{ env.VERSION }}" - body: ${{ env.RELEASE_NOTES }} - branch: "ui/update" - delete-branch: true + moduleName: 'renterd' + goVersion: '1.21' + token: ${{ secrets.GITHUB_TOKEN }} From 8deb67fa2eeec7cbd7929d4c0a5576f0a3209987 Mon Sep 17 00:00:00 2001 From: ChrisSchinnerl Date: Sat, 30 Mar 2024 00:08:03 +0000 Subject: [PATCH 142/201] ui: v0.49.0 --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 39938cb02..9a0fe75f4 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( go.sia.tech/web/renterd v0.49.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.21.0 + golang.org/x/sys v0.18.0 golang.org/x/term v0.18.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.6 @@ -77,7 +78,6 @@ require ( go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.1 // indirect From f5f13fe03c38ade179c0dd6123e07d15e0a0be0f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 01:21:25 +0000 Subject: [PATCH 143/201] build(deps): bump gorm.io/gorm from 1.25.8 to 1.25.9 Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.25.8 to 1.25.9. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.25.8...v1.25.9) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 39938cb02..a46cea13f 100644 --- a/go.mod +++ b/go.mod @@ -21,11 +21,12 @@ require ( go.sia.tech/web/renterd v0.49.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.21.0 + golang.org/x/sys v0.18.0 golang.org/x/term v0.18.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.6 gorm.io/driver/sqlite v1.5.5 - gorm.io/gorm v1.25.8 + gorm.io/gorm v1.25.9 lukechampine.com/frand v1.4.2 moul.io/zapgorm2 v1.3.0 ) @@ -77,7 +78,6 @@ require ( go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.1 // indirect diff --git a/go.sum b/go.sum index db1951792..407d36e57 100644 --- a/go.sum +++ b/go.sum @@ -413,8 +413,8 @@ gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo= -gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/gorm v1.25.9 h1:wct0gxZIELDk8+ZqF/MVnHLkA1rvYlBWUMv2EdsK1g8= +gorm.io/gorm v1.25.9/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= From dbccaebf2c23192ca1bd4f31f01791d5e7d6e757 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 28 Mar 2024 13:33:02 +0100 Subject: [PATCH 144/201] workflows: update deprecated github actions --- .github/workflows/publish.yml | 20 ++++++++++---------- .github/workflows/test.yml | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 65e4f8ed4..3bf909519 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -28,7 +28,7 @@ jobs: permissions: packages: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: docker/setup-qemu-action@v2 - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v2 @@ -71,8 +71,8 @@ jobs: matrix: network: ["mainnet" , "zen"] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: 'stable' - name: Setup @@ -112,7 +112,7 @@ jobs: go build -tags="$BUILD_TAGS" -trimpath -o bin/ -a -ldflags '-s -w -linkmode external -extldflags "-static"' ./cmd/renterd cp README.md LICENSE bin/ zip -qj $ZIP_OUTPUT bin/* - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: renterd path: release/ @@ -122,8 +122,8 @@ jobs: matrix: network: ["mainnet" , "zen"] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: 'stable' - name: Setup Notarization @@ -208,7 +208,7 @@ jobs: /usr/bin/codesign --deep -f -v --timestamp -o runtime,library -s $APPLE_CERT_ID bin/renterd ditto -ck bin $ZIP_OUTPUT xcrun notarytool submit -k ~/private_keys/AuthKey_$APPLE_API_KEY.p8 -d $APPLE_API_KEY -i $APPLE_API_ISSUER --wait --timeout 10m $ZIP_OUTPUT - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: renterd path: release/ @@ -218,8 +218,8 @@ jobs: matrix: network: ["mainnet" , "zen"] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: 'stable' - name: Set build tag environment variable @@ -249,7 +249,7 @@ jobs: azuresigntool sign -kvu "${{ secrets.AZURE_KEY_VAULT_URI }}" -kvi "${{ secrets.AZURE_CLIENT_ID }}" -kvt "${{ secrets.AZURE_TENANT_ID }}" -kvs "${{ secrets.AZURE_CLIENT_SECRET }}" -kvc ${{ secrets.AZURE_CERT_NAME }} -tr http://timestamp.digicert.com -v bin/renterd.exe cp README.md LICENSE bin/ 7z a $ZIP_OUTPUT bin/* - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: renterd path: release/ diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cc6c5c1f3..b389ebff2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,9 +18,9 @@ jobs: if: matrix.os == 'windows-latest' run: git config --global core.autocrlf false # fixes go lint fmt error - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} - name: Lint From ae21af31cb4ea727cd2987f202529a554b9b2d97 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 2 Apr 2024 11:43:42 +0200 Subject: [PATCH 145/201] stores: batch pruning of slabs --- stores/metadata.go | 42 +++++++++++++++++++++++++++++++----------- stores/multipart.go | 10 +++------- stores/sql.go | 10 ++++++++++ 3 files changed, 44 insertions(+), 18 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index b754be50a..2ffdb9525 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2713,14 +2713,35 @@ func archiveContracts(tx *gorm.DB, contracts []dbContract, toArchive map[types.F return nil } -func pruneSlabs(tx *gorm.DB) error { - // delete slabs without any associated slices or buffers - return tx.Exec(` -DELETE -FROM slabs -WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) -AND slabs.db_buffered_slab_id IS NULL -`).Error +func (s *SQLStore) pruneSlabsLoop() { + for { + select { + case <-s.slabPruneSigChan: + case <-s.shutdownCtx.Done(): + return + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { + return tx.Exec(` + DELETE + FROM slabs + WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) + AND slabs.db_buffered_slab_id IS NULL + `).Error + }) + if err != nil { + s.logger.Errorw("failed to prune slabs", zap.Error(err)) + } + cancel() + } +} + +func (s *SQLStore) pruneSlabs() { + select { + case s.slabPruneSigChan <- struct{}{}: + default: + } } // deleteObject deletes an object from the store and prunes all slabs which are @@ -2749,9 +2770,8 @@ func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, numDeleted := tx.RowsAffected if numDeleted == 0 { return 0, nil // nothing to prune if no object was deleted - } else if err := pruneSlabs(tx); err != nil { - return numDeleted, err } + s.pruneSlabs() return numDeleted, nil } @@ -2784,7 +2804,7 @@ func (s *SQLStore) deleteObjects(ctx context.Context, bucket string, path string // prune slabs if we deleted an object rowsAffected = res.RowsAffected if rowsAffected > 0 { - return pruneSlabs(tx) + s.pruneSlabs() } duration = time.Since(start) return nil diff --git a/stores/multipart.go b/stores/multipart.go index 3da5f7992..50c2121a6 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -305,10 +305,8 @@ func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string } return errors.New("failed to delete multipart upload for unknown reason") } - // Prune the slabs. - if err := pruneSlabs(tx); err != nil { - return fmt.Errorf("failed to prune slabs: %w", err) - } + // Prune the dangling slabs. + s.pruneSlabs() return nil }) } @@ -459,9 +457,7 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str } // Prune the slabs. - if err := pruneSlabs(tx); err != nil { - return fmt.Errorf("failed to prune slabs: %w", err) - } + s.pruneSlabs() return nil }) if err != nil { diff --git a/stores/sql.go b/stores/sql.go index 34a6d78ab..5daf9122d 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -285,6 +285,7 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { if err != nil { return nil, modules.ConsensusChangeID{}, err } + ss.initSlabPruning() return ss, ccid, nil } @@ -305,6 +306,15 @@ func (ss *SQLStore) hasAllowlist() bool { return ss.allowListCnt > 0 } +func (s *SQLStore) initSlabPruning() { + s.wg.Add(1) + go func() { + s.pruneSlabsLoop() + s.wg.Done() + }() + s.pruneSlabs() +} + func (ss *SQLStore) updateHasAllowlist(err *error) { if *err != nil { return From 3f988a997c5df054cdaba4fcc10b290bfa433688 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 2 Apr 2024 11:54:20 +0200 Subject: [PATCH 146/201] stores: register alert for failed slab pruning --- stores/metadata.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/stores/metadata.go b/stores/metadata.go index 2ffdb9525..fcb3bd156 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -13,12 +13,18 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" "go.sia.tech/siad/modules" "go.uber.org/zap" "gorm.io/gorm" "gorm.io/gorm/clause" + "lukechampine.com/frand" +) + +var ( + pruneSlabsAlertID = frand.Entropy256() ) const ( @@ -2732,6 +2738,18 @@ func (s *SQLStore) pruneSlabsLoop() { }) if err != nil { s.logger.Errorw("failed to prune slabs", zap.Error(err)) + s.alerts.RegisterAlert(s.shutdownCtx, alerts.Alert{ + ID: pruneSlabsAlertID, + Severity: alerts.SeverityWarning, + Message: "Failed to prune slabs from database", + Timestamp: time.Now(), + Data: map[string]interface{}{ + "error": err.Error(), + "hint": "This might happen when your database is under a lot of load due to deleting objects rapidly. This alert will disappear the next time slabs are pruned successfully.", + }, + }) + } else { + s.alerts.DismissAlerts(s.shutdownCtx, pruneSlabsAlertID) } cancel() } From 210f340647d1245266badc9a1886d6b3f49be06a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 2 Apr 2024 12:10:12 +0200 Subject: [PATCH 147/201] stores: fix TestSQLMetadataStore --- stores/metadata_test.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index eb082fb54..b92ced4b2 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1058,9 +1058,9 @@ func TestSQLMetadataStore(t *testing.T) { // incremented due to the object and slab being overwritten. two := uint(2) expectedObj.Slabs[0].DBObjectID = &two - expectedObj.Slabs[0].DBSlabID = 3 + expectedObj.Slabs[0].DBSlabID = 1 expectedObj.Slabs[1].DBObjectID = &two - expectedObj.Slabs[1].DBSlabID = 4 + expectedObj.Slabs[1].DBSlabID = 2 if !reflect.DeepEqual(obj, expectedObj) { t.Fatal("object mismatch", cmp.Diff(obj, expectedObj)) } @@ -1082,7 +1082,7 @@ func TestSQLMetadataStore(t *testing.T) { TotalShards: 1, Shards: []dbSector{ { - DBSlabID: 3, + DBSlabID: 1, SlabIndex: 1, Root: obj1.Slabs[0].Shards[0].Root[:], LatestHost: publicKey(obj1.Slabs[0].Shards[0].LatestHost), @@ -1122,7 +1122,7 @@ func TestSQLMetadataStore(t *testing.T) { TotalShards: 1, Shards: []dbSector{ { - DBSlabID: 4, + DBSlabID: 2, SlabIndex: 1, Root: obj1.Slabs[1].Shards[0].Root[:], LatestHost: publicKey(obj1.Slabs[1].Shards[0].LatestHost), @@ -4028,7 +4028,7 @@ func TestRefreshHealth(t *testing.T) { } } -func TestSlabCleanupTrigger(t *testing.T) { +func TestSlabCleanup(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() @@ -4112,11 +4112,14 @@ func TestSlabCleanupTrigger(t *testing.T) { t.Fatal(err) } - if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { - t.Fatal(err) - } else if slabCntr != 0 { - t.Fatalf("expected 0 slabs, got %v", slabCntr) - } + ss.Retry(100, 100*time.Millisecond, func() error { + if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { + t.Fatal(err) + } else if slabCntr != 0 { + return fmt.Errorf("expected 0 slabs, got %v", slabCntr) + } + return nil + }) // create another object that references a slab with buffer ek, _ = object.GenerateEncryptionKey().MarshalBinary() From 9f5dc73a9bbafdb6e36a4783db747c55c017682b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 2 Apr 2024 12:30:50 +0200 Subject: [PATCH 148/201] stores: prune slabs blockingly upon startup --- stores/metadata.go | 24 +++++++++++++----------- stores/multipart.go | 4 ++-- stores/sql.go | 13 ++++++++++--- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index fcb3bd156..d8e4962c9 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2728,14 +2728,7 @@ func (s *SQLStore) pruneSlabsLoop() { } ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - err := s.retryTransaction(ctx, func(tx *gorm.DB) error { - return tx.Exec(` - DELETE - FROM slabs - WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) - AND slabs.db_buffered_slab_id IS NULL - `).Error - }) + err := s.retryTransaction(ctx, pruneSlabs) if err != nil { s.logger.Errorw("failed to prune slabs", zap.Error(err)) s.alerts.RegisterAlert(s.shutdownCtx, alerts.Alert{ @@ -2755,7 +2748,16 @@ func (s *SQLStore) pruneSlabsLoop() { } } -func (s *SQLStore) pruneSlabs() { +func pruneSlabs(tx *gorm.DB) error { + return tx.Exec(` +DELETE +FROM slabs +WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) +AND slabs.db_buffered_slab_id IS NULL +`).Error +} + +func (s *SQLStore) triggerSlabPruning() { select { case s.slabPruneSigChan <- struct{}{}: default: @@ -2789,7 +2791,7 @@ func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, if numDeleted == 0 { return 0, nil // nothing to prune if no object was deleted } - s.pruneSlabs() + s.triggerSlabPruning() return numDeleted, nil } @@ -2822,7 +2824,7 @@ func (s *SQLStore) deleteObjects(ctx context.Context, bucket string, path string // prune slabs if we deleted an object rowsAffected = res.RowsAffected if rowsAffected > 0 { - s.pruneSlabs() + s.triggerSlabPruning() } duration = time.Since(start) return nil diff --git a/stores/multipart.go b/stores/multipart.go index 50c2121a6..5fde55d7b 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -306,7 +306,7 @@ func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string return errors.New("failed to delete multipart upload for unknown reason") } // Prune the dangling slabs. - s.pruneSlabs() + s.triggerSlabPruning() return nil }) } @@ -457,7 +457,7 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str } // Prune the slabs. - s.pruneSlabs() + s.triggerSlabPruning() return nil }) if err != nil { diff --git a/stores/sql.go b/stores/sql.go index 5daf9122d..b64adddca 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -250,6 +250,8 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { } shutdownCtx, shutdownCtxCancel := context.WithCancel(context.Background()) + slabPruneOngoing := make(chan struct{}) + close(slabPruneOngoing) ss := &SQLStore{ alerts: cfg.Alerts, db: db, @@ -285,7 +287,9 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { if err != nil { return nil, modules.ConsensusChangeID{}, err } - ss.initSlabPruning() + if err := ss.initSlabPruning(); err != nil { + return nil, modules.ConsensusChangeID{}, err + } return ss, ccid, nil } @@ -306,13 +310,16 @@ func (ss *SQLStore) hasAllowlist() bool { return ss.allowListCnt > 0 } -func (s *SQLStore) initSlabPruning() { +func (s *SQLStore) initSlabPruning() error { + // start pruning loop s.wg.Add(1) go func() { s.pruneSlabsLoop() s.wg.Done() }() - s.pruneSlabs() + + // prune once to guarantee consistency on startup + return s.retryTransaction(s.shutdownCtx, pruneSlabs) } func (ss *SQLStore) updateHasAllowlist(err *error) { From 98626ba21fd5aed7b9ce4966f72c8fe566179950 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 2 Apr 2024 13:33:52 +0200 Subject: [PATCH 149/201] stores: fix TestSlabCleanup --- stores/metadata_test.go | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index b92ced4b2..ebb5f0df1 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4097,24 +4097,29 @@ func TestSlabCleanup(t *testing.T) { if err != nil { t.Fatal(err) } + time.Sleep(100 * time.Millisecond) // check slice count var slabCntr int64 - if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { - t.Fatal(err) - } else if slabCntr != 1 { - t.Fatalf("expected 1 slabs, got %v", slabCntr) - } + ss.Retry(100, 100*time.Millisecond, func() error { + if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { + return err + } else if slabCntr != 1 { + return fmt.Errorf("expected 1 slabs, got %v", slabCntr) + } + return nil + }) // delete second object err = ss.RemoveObject(context.Background(), api.DefaultBucketName, obj2.ObjectID) if err != nil { t.Fatal(err) } + time.Sleep(100 * time.Millisecond) ss.Retry(100, 100*time.Millisecond, func() error { if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { - t.Fatal(err) + return err } else if slabCntr != 0 { return fmt.Errorf("expected 0 slabs, got %v", slabCntr) } @@ -4159,11 +4164,16 @@ func TestSlabCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { - t.Fatal(err) - } else if slabCntr != 1 { - t.Fatalf("expected 1 slabs, got %v", slabCntr) - } + time.Sleep(100 * time.Millisecond) + + ss.Retry(100, 100*time.Millisecond, func() error { + if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { + return err + } else if slabCntr != 1 { + return fmt.Errorf("expected 1 slabs, got %v", slabCntr) + } + return nil + }) } func TestUpsertSectors(t *testing.T) { From c3a2fe615ee5cf26be1902e1dd8a18a7f9c77ba6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 3 Apr 2024 11:48:21 +0200 Subject: [PATCH 150/201] contractor: address comments --- autopilot/contractor/contractor.go | 144 +++++++++++++++-------------- autopilot/contractor/state.go | 87 +++++++++++++---- 2 files changed, 143 insertions(+), 88 deletions(-) diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 1697e1cfd..a3bfb1375 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -138,7 +138,6 @@ type ( priceTable rhpv3.HostPriceTable usable bool recoverable bool - InSet bool } contractSetAdditions struct { @@ -224,16 +223,25 @@ func canSkipContractMaintenance(ctx context.Context, cfg api.ContractsConfig) (s } func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, state *MaintenanceState) (bool, error) { + return c.performContractMaintenance(newMaintenanceCtx(ctx, state), w) +} + +func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, error) { + mCtx := newMaintenanceCtx(ctx, ctx.state) + // check if we can skip maintenance - if reason, skip := canSkipContractMaintenance(ctx, state.ContractsConfig()); skip { + if reason, skip := canSkipContractMaintenance(ctx, ctx.ContractsConfig()); skip { if reason != "" { c.logger.Warn(reason) } + if skip { + return false, nil + } } c.logger.Info("performing contract maintenance") // fetch current contract set - currentSet, err := c.bus.Contracts(ctx, api.ContractsOpts{ContractSet: state.ContractSet()}) + currentSet, err := c.bus.Contracts(ctx, api.ContractsOpts{ContractSet: ctx.ContractSet()}) if err != nil && !strings.Contains(err.Error(), api.ErrContractSetNotFound.Error()) { return false, err } @@ -241,7 +249,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s for _, c := range currentSet { isInCurrentSet[c.ID] = struct{}{} } - c.logger.Infof("contract set '%s' holds %d contracts", state.ContractSet(), len(currentSet)) + c.logger.Infof("contract set '%s' holds %d contracts", ctx.ContractSet(), len(currentSet)) // fetch all contracts from the worker. start := time.Now() @@ -302,7 +310,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s } // fetch candidate hosts - candidates, unusableHosts, err := c.candidateHosts(ctx, state, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts + candidates, unusableHosts, err := c.candidateHosts(mCtx, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts if err != nil { return false, err } @@ -310,13 +318,13 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s // min score to pass checks var minScore float64 if len(hosts) > 0 { - minScore = c.calculateMinScore(candidates, state.WantedContracts()) + minScore = c.calculateMinScore(candidates, ctx.WantedContracts()) } else { c.logger.Warn("could not calculate min score, no hosts found") } // run host checks - checks, err := c.runHostChecks(ctx, state, hosts, hostData, minScore) + checks, err := c.runHostChecks(mCtx, hosts, hostData, minScore) if err != nil { return false, fmt.Errorf("failed to run host checks, err: %v", err) } @@ -328,11 +336,11 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s } // run contract checks - updatedSet, toArchive, toStopUsing, toRefresh, toRenew := c.runContractChecks(ctx, state, checks, contracts, isInCurrentSet, cs.BlockHeight) + updatedSet, toArchive, toStopUsing, toRefresh, toRenew := c.runContractChecks(mCtx, checks, contracts, isInCurrentSet, cs.BlockHeight) // update host checks for hk, check := range checks { - if err := c.bus.UpdateHostCheck(ctx, state.AP.ID, hk, *check); err != nil { + if err := c.bus.UpdateHostCheck(ctx, ctx.ApID(), hk, *check); err != nil { c.logger.Errorf("failed to update host check for host %v, err: %v", hk, err) } } @@ -346,7 +354,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s } // calculate remaining funds - remaining := c.remainingFunds(contracts, state) + remaining := c.remainingFunds(contracts, mCtx.state) // calculate 'limit' amount of contracts we want to renew var limit int @@ -363,7 +371,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s } return toRenew[i].contract.FileSize() > toRenew[j].contract.FileSize() }) - for len(updatedSet)+limit < int(state.WantedContracts()) && limit < len(toRenew) { + for len(updatedSet)+limit < int(ctx.WantedContracts()) && limit < len(toRenew) { // as long as we're missing contracts, increase the renewal limit limit++ } @@ -375,7 +383,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s var renewed []renewal if limit > 0 { var toKeep []api.ContractMetadata - renewed, toKeep = c.runContractRenewals(ctx, w, state, toRenew, &remaining, limit) + renewed, toKeep = c.runContractRenewals(ctx, w, toRenew, &remaining, limit) for _, ri := range renewed { if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) @@ -386,7 +394,7 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s } // run contract refreshes - refreshed, err := c.runContractRefreshes(ctx, w, state, toRefresh, &remaining) + refreshed, err := c.runContractRefreshes(ctx, w, toRefresh, &remaining) if err != nil { c.logger.Errorf("failed to refresh contracts, err: %v", err) // continue } else { @@ -401,15 +409,15 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s // to avoid forming new contracts as soon as we dip below // 'Contracts.Amount', we define a threshold but only if we have more // contracts than 'Contracts.Amount' already - threshold := state.WantedContracts() - if uint64(len(contracts)) > state.WantedContracts() { + threshold := ctx.WantedContracts() + if uint64(len(contracts)) > ctx.WantedContracts() { threshold = addLeeway(threshold, leewayPctRequiredContracts) } // check if we need to form contracts and add them to the contract set var formed []api.ContractMetadata - if uint64(len(updatedSet)) < threshold && !state.SkipContractFormations { - formed, err = c.runContractFormations(ctx, w, state, candidates, usedHosts, unusableHosts, state.WantedContracts()-uint64(len(updatedSet)), &remaining) + if uint64(len(updatedSet)) < threshold && !ctx.state.SkipContractFormations { + formed, err = c.runContractFormations(ctx, w, candidates, usedHosts, unusableHosts, ctx.WantedContracts()-uint64(len(updatedSet)), &remaining) if err != nil { c.logger.Errorf("failed to form contracts, err: %v", err) // continue } else { @@ -426,15 +434,15 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s c.logger.Errorf("contract %v not found in contractData", contract.ID) } } - if len(updatedSet) > int(state.WantedContracts()) { + if len(updatedSet) > int(ctx.WantedContracts()) { // sort by contract size sort.Slice(updatedSet, func(i, j int) bool { return contractData[updatedSet[i].ID] > contractData[updatedSet[j].ID] }) - for _, contract := range updatedSet[state.WantedContracts():] { + for _, contract := range updatedSet[ctx.WantedContracts():] { toStopUsing[contract.ID] = "truncated" } - updatedSet = updatedSet[:state.WantedContracts()] + updatedSet = updatedSet[:ctx.WantedContracts()] } // convert to set of file contract ids @@ -444,17 +452,17 @@ func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, s } // update contract set - err = c.bus.SetContractSet(ctx, state.ContractSet(), newSet) + err = c.bus.SetContractSet(ctx, ctx.ContractSet(), newSet) if err != nil { return false, err } // return whether the maintenance changed the contract set - return c.computeContractSetChanged(ctx, state, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil + return c.computeContractSetChanged(mCtx, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil } -func (c *Contractor) computeContractSetChanged(ctx context.Context, state *MaintenanceState, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { - name := state.ContractSet() +func (c *Contractor) computeContractSetChanged(ctx *mCtx, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { + name := ctx.ContractSet() // build set lookups inOldSet := make(map[types.FileContractID]struct{}) @@ -531,7 +539,7 @@ func (c *Contractor) computeContractSetChanged(ctx context.Context, state *Maint // log a warning if the contract set does not contain enough contracts logFn := c.logger.Infow - if len(newSet) < int(state.RS.TotalShards) { + if len(newSet) < int(ctx.state.RS.TotalShards) { logFn = c.logger.Warnw } @@ -539,7 +547,7 @@ func (c *Contractor) computeContractSetChanged(ctx context.Context, state *Maint var metrics []api.ContractSetChurnMetric for fcid := range setAdditions { metrics = append(metrics, api.ContractSetChurnMetric{ - Name: state.ContractSet(), + Name: ctx.ContractSet(), ContractID: fcid, Direction: api.ChurnDirAdded, Timestamp: now, @@ -547,7 +555,7 @@ func (c *Contractor) computeContractSetChanged(ctx context.Context, state *Maint } for fcid, removal := range setRemovals { metrics = append(metrics, api.ContractSetChurnMetric{ - Name: state.ContractSet(), + Name: ctx.ContractSet(), ContractID: fcid, Direction: api.ChurnDirRemoved, Reason: removal.Removals[0].Reason, @@ -581,7 +589,7 @@ func (c *Contractor) computeContractSetChanged(ctx context.Context, state *Maint return hasChanged } -func (c *Contractor) runContractChecks(ctx context.Context, state *MaintenanceState, hostChecks map[types.PublicKey]*api.HostCheck, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, bh uint64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo) { +func (c *Contractor) runContractChecks(ctx *mCtx, hostChecks map[types.PublicKey]*api.HostCheck, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, bh uint64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo) { select { case <-ctx.Done(): return @@ -595,7 +603,7 @@ func (c *Contractor) runContractChecks(ctx context.Context, state *MaintenanceSt // calculate 'maxKeepLeeway' which defines the amount of contracts we'll be // lenient towards when we fail to either fetch a valid price table or the // contract's revision - maxKeepLeeway := addLeeway(state.WantedContracts(), 1-leewayPctRequiredContracts) + maxKeepLeeway := addLeeway(ctx.WantedContracts(), 1-leewayPctRequiredContracts) remainingKeepLeeway := maxKeepLeeway var notfound int @@ -692,7 +700,7 @@ LOOP: if contract.Revision == nil { if _, found := inCurrentSet[fcid]; !found || remainingKeepLeeway == 0 { toStopUsing[fcid] = errContractNoRevision.Error() - } else if !state.AllowRedundantIPs() && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { + } else if !ctx.AllowRedundantIPs() && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { toStopUsing[fcid] = fmt.Sprintf("%v; %v", api.ErrUsabilityHostRedundantIP, errContractNoRevision) hostChecks[contract.HostKey].Usability.RedundantIP = true } else { @@ -704,7 +712,7 @@ LOOP: // decide whether the contract is still good ci := contractInfo{contract: contract, priceTable: host.PriceTable.HostPriceTable, settings: host.Settings} - usable, recoverable, refresh, renew, reasons := c.isUsableContract(state.AutopilotConfig(), state.RS, ci, bh, ipFilter) + usable, recoverable, refresh, renew, reasons := c.isUsableContract(ctx.AutopilotConfig(), ctx.state.RS, ci, bh, ipFilter) ci.usable = usable ci.recoverable = recoverable if !usable { @@ -734,7 +742,7 @@ LOOP: return toKeep, toArchive, toStopUsing, toRefresh, toRenew } -func (c *Contractor) runHostChecks(ctx context.Context, state *MaintenanceState, hosts []api.Host, hostData map[types.PublicKey]uint64, minScore float64) (map[types.PublicKey]*api.HostCheck, error) { +func (c *Contractor) runHostChecks(ctx *mCtx, hosts []api.Host, hostData map[types.PublicKey]uint64, minScore float64) (map[types.PublicKey]*api.HostCheck, error) { // fetch consensus state cs, err := c.bus.ConsensusState(ctx) if err != nil { @@ -742,18 +750,18 @@ func (c *Contractor) runHostChecks(ctx context.Context, state *MaintenanceState, } // create gouging checker - gc := worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) + gc := worker.NewGougingChecker(ctx.state.GS, cs, ctx.state.Fee, ctx.state.Period(), ctx.RenewWindow()) // check all hosts checks := make(map[types.PublicKey]*api.HostCheck) for _, h := range hosts { h.PriceTable.HostBlockHeight = cs.BlockHeight // ignore HostBlockHeight - checks[h.PublicKey] = checkHost(state.ContractsConfig(), state.RS, gc, h, minScore, hostData[h.PublicKey]) + checks[h.PublicKey] = checkHost(ctx.ContractsConfig(), ctx.state.RS, gc, h, minScore, hostData[h.PublicKey]) } return checks, nil } -func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state *MaintenanceState, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostsBreakdown, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { +func (c *Contractor) runContractFormations(ctx *mCtx, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostsBreakdown, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { select { case <-c.shutdownCtx.Done(): return nil, nil @@ -761,12 +769,12 @@ func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state } // convenience variables - shouldFilter := !state.AllowRedundantIPs() + shouldFilter := !ctx.AllowRedundantIPs() c.logger.Infow( "run contract formations", "usedHosts", len(usedHosts), - "required", state.WantedContracts(), + "required", ctx.WantedContracts(), "missing", missing, "budget", budget, ) @@ -804,7 +812,7 @@ func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state lastStateUpdate := time.Now() // prepare a gouging checker - gc := state.GougingChecker(cs) + gc := ctx.GougingChecker(cs) // prepare an IP filter that contains all used hosts ipFilter := c.newIPFilter() @@ -817,7 +825,7 @@ func (c *Contractor) runContractFormations(ctx context.Context, w Worker, state } // calculate min/max contract funds - minInitialContractFunds, maxInitialContractFunds := initialContractFundingMinMax(state.AutopilotConfig()) + minInitialContractFunds, maxInitialContractFunds := initialContractFundingMinMax(ctx.AutopilotConfig()) LOOP: for h := 0; missing > 0 && h < len(selected); h++ { @@ -844,7 +852,7 @@ LOOP: c.logger.Errorf("could not fetch consensus state, err: %v", err) } else { cs = css - gc = state.GougingChecker(cs) + gc = ctx.GougingChecker(cs) } } @@ -859,7 +867,7 @@ LOOP: continue } - formedContract, proceed, err := c.formContract(ctx, w, state, host, minInitialContractFunds, maxInitialContractFunds, budget) + formedContract, proceed, err := c.formContract(ctx, w, host, minInitialContractFunds, maxInitialContractFunds, budget) if err == nil { // add contract to contract set formed = append(formed, formedContract) @@ -942,7 +950,7 @@ func (c *Contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } } -func (c *Contractor) runContractRenewals(ctx context.Context, w Worker, state *MaintenanceState, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { +func (c *Contractor) runContractRenewals(ctx *mCtx, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { c.logger.Infow( "run contracts renewals", "torenew", len(toRenew), @@ -974,7 +982,7 @@ func (c *Contractor) runContractRenewals(ctx context.Context, w Worker, state *M // renew and add if it succeeds or if its usable contract := toRenew[i].contract.ContractMetadata - renewed, proceed, err := c.renewContract(ctx, w, state, toRenew[i], budget) + renewed, proceed, err := c.renewContract(ctx, w, toRenew[i], budget) if err != nil { c.alerter.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) if toRenew[i].usable { @@ -1002,7 +1010,7 @@ func (c *Contractor) runContractRenewals(ctx context.Context, w Worker, state *M return renewals, toKeep } -func (c *Contractor) runContractRefreshes(ctx context.Context, w Worker, state *MaintenanceState, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { +func (c *Contractor) runContractRefreshes(ctx *mCtx, w Worker, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) { c.logger.Infow( "run contracts refreshes", "torefresh", len(toRefresh), @@ -1025,7 +1033,7 @@ func (c *Contractor) runContractRefreshes(ctx context.Context, w Worker, state * } // refresh and add if it succeeds - renewed, proceed, err := c.refreshContract(ctx, w, state, ci, budget) + renewed, proceed, err := c.refreshContract(ctx, w, ci, budget) if err == nil { refreshed = append(refreshed, renewal{from: ci.contract.ContractMetadata, to: renewed, ci: ci}) } @@ -1076,13 +1084,13 @@ func (c *Contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contract return refreshAmountCapped } -func (c *Contractor) renewFundingEstimate(ctx context.Context, state *MaintenanceState, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { +func (c *Contractor) renewFundingEstimate(ctx *mCtx, ci contractInfo, fee types.Currency, renewing bool) (types.Currency, error) { // estimate the cost of the current data stored dataStored := ci.contract.FileSize() - storageCost := sectorStorageCost(ci.priceTable, state.Period()).Mul64(bytesToSectors(dataStored)) + storageCost := sectorStorageCost(ci.priceTable, ctx.state.Period()).Mul64(bytesToSectors(dataStored)) // fetch the spending of the contract we want to renew. - prevSpending, err := c.contractSpending(ctx, ci.contract, state.Period()) + prevSpending, err := c.contractSpending(ctx, ci.contract, ctx.state.Period()) if err != nil { c.logger.Errorw( fmt.Sprintf("could not retrieve contract spending, err: %v", err), @@ -1097,7 +1105,7 @@ func (c *Contractor) renewFundingEstimate(ctx context.Context, state *Maintenanc // TODO: estimate is not ideal because price can change, better would be to // look at the amount of data stored in the contract from the previous cycle prevUploadDataEstimate := types.NewCurrency64(dataStored) // default to assuming all data was uploaded - sectorUploadCost := sectorUploadCost(ci.priceTable, state.Period()) + sectorUploadCost := sectorUploadCost(ci.priceTable, ctx.Period()) if !sectorUploadCost.IsZero() { prevUploadDataEstimate = prevSpending.Uploads.Div(sectorUploadCost).Mul64(rhpv2.SectorSize) } @@ -1135,7 +1143,7 @@ func (c *Contractor) renewFundingEstimate(ctx context.Context, state *Maintenanc // check for a sane minimum that is equal to the initial contract funding // but without an upper cap. - minInitialContractFunds, _ := initialContractFundingMinMax(state.AutopilotConfig()) + minInitialContractFunds, _ := initialContractFundingMinMax(ctx.AutopilotConfig()) minimum := c.initialContractFunding(ci.settings, txnFeeEstimate, minInitialContractFunds, types.ZeroCurrency) cappedEstimatedCost := estimatedCost if cappedEstimatedCost.Cmp(minimum) < 0 { @@ -1212,7 +1220,7 @@ func (c *Contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *Contractor) candidateHosts(ctx context.Context, state *MaintenanceState, hosts []api.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostsBreakdown, error) { +func (c *Contractor) candidateHosts(ctx *mCtx, hosts []api.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostsBreakdown, error) { start := time.Now() // fetch consensus state @@ -1222,7 +1230,7 @@ func (c *Contractor) candidateHosts(ctx context.Context, state *MaintenanceState } // create a gouging checker - gc := state.GougingChecker(cs) + gc := ctx.GougingChecker(cs) // select unused hosts that passed a scan var unused []api.Host @@ -1265,7 +1273,7 @@ func (c *Contractor) candidateHosts(ctx context.Context, state *MaintenanceState // NOTE: ignore the pricetable's HostBlockHeight by setting it to our // own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - hc := checkHost(state.ContractsConfig(), state.RS, gc, h, minScore, storedData[h.PublicKey]) + hc := checkHost(ctx.ContractsConfig(), ctx.state.RS, gc, h, minScore, storedData[h.PublicKey]) if hc.Usability.IsUsable() { candidates = append(candidates, scoredHost{h, hc.Score.Score()}) continue @@ -1287,7 +1295,7 @@ func (c *Contractor) candidateHosts(ctx context.Context, state *MaintenanceState return candidates, unusableHosts, nil } -func (c *Contractor) renewContract(ctx context.Context, w Worker, state *MaintenanceState, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) renewContract(ctx *mCtx, w Worker, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't renew contract without a revision") } @@ -1306,7 +1314,7 @@ func (c *Contractor) renewContract(ctx context.Context, w Worker, state *Mainten } // calculate the renter funds - renterFunds, err := c.renewFundingEstimate(ctx, state, ci, state.Fee, true) + renterFunds, err := c.renewFundingEstimate(ctx, ci, ctx.state.Fee, true) if err != nil { c.logger.Errorw(fmt.Sprintf("could not get renew funding estimate, err: %v", err), "hk", hk, "fcid", fcid) return api.ContractMetadata{}, true, err @@ -1319,9 +1327,9 @@ func (c *Contractor) renewContract(ctx context.Context, w Worker, state *Mainten } // sanity check the endheight is not the same on renewals - endHeight := state.EndHeight() + endHeight := ctx.EndHeight() if endHeight <= rev.EndHeight() { - c.logger.Infow("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", state.Period, "bh", cs.BlockHeight) + c.logger.Infow("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", ctx.state.Period, "bh", cs.BlockHeight) return api.ContractMetadata{}, false, fmt.Errorf("renewal endheight should surpass the current contract endheight, %v <= %v", endHeight, rev.EndHeight()) } @@ -1329,7 +1337,7 @@ func (c *Contractor) renewContract(ctx context.Context, w Worker, state *Mainten expectedNewStorage := renterFundsToExpectedStorage(renterFunds, endHeight-cs.BlockHeight, ci.priceTable) // renew the contract - resp, err := w.RHPRenew(ctx, fcid, endHeight, hk, contract.SiamuxAddr, settings.Address, state.Address, renterFunds, types.ZeroCurrency, expectedNewStorage, settings.WindowSize) + resp, err := w.RHPRenew(ctx, fcid, endHeight, hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, types.ZeroCurrency, expectedNewStorage, settings.WindowSize) if err != nil { c.logger.Errorw( "renewal failed", @@ -1367,7 +1375,7 @@ func (c *Contractor) renewContract(ctx context.Context, w Worker, state *Mainten return renewedContract, true, nil } -func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *MaintenanceState, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) refreshContract(ctx *mCtx, w Worker, ci contractInfo, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { if ci.contract.Revision == nil { return api.ContractMetadata{}, true, errors.New("can't refresh contract without a revision") } @@ -1387,8 +1395,8 @@ func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *Maint // calculate the renter funds var renterFunds types.Currency - if isOutOfFunds(state.AutopilotConfig(), ci.priceTable, ci.contract) { - renterFunds = c.refreshFundingEstimate(state.AutopilotConfig(), ci, state.Fee) + if isOutOfFunds(ctx.AutopilotConfig(), ci.priceTable, ci.contract) { + renterFunds = c.refreshFundingEstimate(ctx.AutopilotConfig(), ci, ctx.state.Fee) } else { renterFunds = rev.ValidRenterPayout() // don't increase funds } @@ -1403,10 +1411,10 @@ func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *Maint unallocatedCollateral := contract.RemainingCollateral() // a refresh should always result in a contract that has enough collateral - minNewCollateral := minRemainingCollateral(state.AutopilotConfig(), state.RS, renterFunds, settings, ci.priceTable).Mul64(2) + minNewCollateral := minRemainingCollateral(ctx.AutopilotConfig(), ctx.state.RS, renterFunds, settings, ci.priceTable).Mul64(2) // renew the contract - resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, state.Address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize) + resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize) if err != nil { if strings.Contains(err.Error(), "new collateral is too low") { c.logger.Infow("refresh failed: contract wouldn't have enough collateral after refresh", @@ -1446,7 +1454,7 @@ func (c *Contractor) refreshContract(ctx context.Context, w Worker, state *Maint return refreshedContract, true, nil } -func (c *Contractor) formContract(ctx context.Context, w Worker, state *MaintenanceState, host api.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { +func (c *Contractor) formContract(ctx *mCtx, w Worker, host api.Host, minInitialContractFunds, maxInitialContractFunds types.Currency, budget *types.Currency) (cm api.ContractMetadata, proceed bool, err error) { // convenience variables hk := host.PublicKey @@ -1464,7 +1472,7 @@ func (c *Contractor) formContract(ctx context.Context, w Worker, state *Maintena } // check our budget - txnFee := state.Fee.Mul64(estimatedFileContractTransactionSetSize) + txnFee := ctx.state.Fee.Mul64(estimatedFileContractTransactionSetSize) renterFunds := initialContractFunding(scan.Settings, txnFee, minInitialContractFunds, maxInitialContractFunds) if budget.Cmp(renterFunds) < 0 { c.logger.Infow("insufficient budget", "budget", budget, "needed", renterFunds) @@ -1472,12 +1480,12 @@ func (c *Contractor) formContract(ctx context.Context, w Worker, state *Maintena } // calculate the host collateral - endHeight := state.EndHeight() + endHeight := ctx.EndHeight() expectedStorage := renterFundsToExpectedStorage(renterFunds, endHeight-cs.BlockHeight, scan.PriceTable) - hostCollateral := rhpv2.ContractFormationCollateral(state.Period(), expectedStorage, scan.Settings) + hostCollateral := rhpv2.ContractFormationCollateral(ctx.Period(), expectedStorage, scan.Settings) // form contract - contract, _, err := w.RHPForm(ctx, endHeight, hk, host.NetAddress, state.Address, renterFunds, hostCollateral) + contract, _, err := w.RHPForm(ctx, endHeight, hk, host.NetAddress, ctx.state.Address, renterFunds, hostCollateral) if err != nil { // TODO: keep track of consecutive failures and break at some point c.logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) diff --git a/autopilot/contractor/state.go b/autopilot/contractor/state.go index 2d505d389..2bf549da1 100644 --- a/autopilot/contractor/state.go +++ b/autopilot/contractor/state.go @@ -1,6 +1,9 @@ package contractor import ( + "context" + "time" + "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/worker" @@ -19,44 +22,88 @@ type ( Fee types.Currency SkipContractFormations bool } + + mCtx struct { + ctx context.Context + state *MaintenanceState + } ) -func (state *MaintenanceState) AllowRedundantIPs() bool { - return state.AP.Config.Hosts.AllowRedundantIPs +func newMaintenanceCtx(ctx context.Context, state *MaintenanceState) *mCtx { + return &mCtx{ + ctx: ctx, + state: state, + } } -func (state *MaintenanceState) Allowance() types.Currency { - return state.AP.Config.Contracts.Allowance +func (ctx *mCtx) ApID() string { + return ctx.state.AP.ID } -func (state *MaintenanceState) AutopilotConfig() api.AutopilotConfig { - return state.AP.Config +func (ctx *mCtx) Deadline() (deadline time.Time, ok bool) { + return ctx.ctx.Deadline() } -func (state *MaintenanceState) ContractsConfig() api.ContractsConfig { - return state.AP.Config.Contracts +func (ctx *mCtx) Done() <-chan struct{} { + return ctx.ctx.Done() } -func (state *MaintenanceState) ContractSet() string { - return state.AP.Config.Contracts.Set +func (ctx *mCtx) Err() error { + return ctx.ctx.Err() } -func (state *MaintenanceState) EndHeight() uint64 { - return state.AP.EndHeight() +func (ctx *mCtx) Value(key interface{}) interface{} { + return ctx.ctx.Value(key) } -func (state *MaintenanceState) GougingChecker(cs api.ConsensusState) worker.GougingChecker { - return worker.NewGougingChecker(state.GS, cs, state.Fee, state.Period(), state.RenewWindow()) +func (ctx *mCtx) AllowRedundantIPs() bool { + return ctx.state.AP.Config.Hosts.AllowRedundantIPs } -func (state *MaintenanceState) WantedContracts() uint64 { - return state.AP.Config.Contracts.Amount +func (ctx *mCtx) Allowance() types.Currency { + return ctx.state.Allowance() } -func (state *MaintenanceState) Period() uint64 { - return state.AP.Config.Contracts.Period +func (ctx *mCtx) AutopilotConfig() api.AutopilotConfig { + return ctx.state.AP.Config +} + +func (ctx *mCtx) ContractsConfig() api.ContractsConfig { + return ctx.state.ContractsConfig() +} + +func (ctx *mCtx) ContractSet() string { + return ctx.state.AP.Config.Contracts.Set +} + +func (ctx *mCtx) EndHeight() uint64 { + return ctx.state.AP.EndHeight() +} + +func (ctx *mCtx) GougingChecker(cs api.ConsensusState) worker.GougingChecker { + return worker.NewGougingChecker(ctx.state.GS, cs, ctx.state.Fee, ctx.Period(), ctx.RenewWindow()) +} + +func (ctx *mCtx) WantedContracts() uint64 { + return ctx.state.AP.Config.Contracts.Amount +} + +func (ctx *mCtx) Period() uint64 { + return ctx.state.Period() } -func (state *MaintenanceState) RenewWindow() uint64 { - return state.AP.Config.Contracts.RenewWindow +func (ctx *mCtx) RenewWindow() uint64 { + return ctx.state.AP.Config.Contracts.RenewWindow +} + +func (state *MaintenanceState) Allowance() types.Currency { + return state.AP.Config.Contracts.Allowance +} + +func (state *MaintenanceState) ContractsConfig() api.ContractsConfig { + return state.AP.Config.Contracts +} + +func (state *MaintenanceState) Period() uint64 { + return state.AP.Config.Contracts.Period } From 1d7286d4a7730352e9adcfa29bce932d69df0f87 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 3 Apr 2024 14:47:33 +0200 Subject: [PATCH 151/201] worker: fix hosts being reused for overdrive before being done uploading --- worker/host_test.go | 8 ++++++-- worker/mocks_test.go | 3 +-- worker/upload.go | 9 +++++---- worker/upload_test.go | 31 +++++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/worker/host_test.go b/worker/host_test.go index 3d124e9aa..5f7a5432c 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -21,7 +21,8 @@ type ( testHost struct { *hostMock *contractMock - hptFn func() api.HostPriceTable + hptFn func() api.HostPriceTable + uploadDelay time.Duration } testHostManager struct { @@ -91,7 +92,10 @@ func (h *testHost) DownloadSector(ctx context.Context, w io.Writer, root types.H } func (h *testHost) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error { - h.AddSector(sector) + h.AddSector(sectorRoot, sector) + if h.uploadDelay > 0 { + time.Sleep(h.uploadDelay) + } return nil } diff --git a/worker/mocks_test.go b/worker/mocks_test.go index 0e45b80df..897d96cdb 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -124,8 +124,7 @@ func newContractMock(hk types.PublicKey, fcid types.FileContractID) *contractMoc } } -func (c *contractMock) AddSector(sector *[rhpv2.SectorSize]byte) (root types.Hash256) { - root = rhpv2.SectorRoot(sector) +func (c *contractMock) AddSector(root types.Hash256, sector *[rhpv2.SectorSize]byte) { c.mu.Lock() c.sectors[root] = sector c.mu.Unlock() diff --git a/worker/upload.go b/worker/upload.go index 4e82f533e..7bcaf5c52 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -952,7 +952,7 @@ loop: func (s *slabUpload) canOverdrive(overdriveTimeout time.Duration) bool { // overdrive is not kicking in yet remaining := s.numSectors - s.numUploaded - if remaining >= s.maxOverdrive { + if remaining > s.maxOverdrive { return false } @@ -1055,7 +1055,7 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { } // redundant sectors can't complete the upload - if sector.uploaded.Root != (types.Hash256{}) { + if sector.isUploaded() { return false, false } @@ -1069,10 +1069,11 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { // update uploaded sectors s.numUploaded++ - // release all other candidates for this sector + // release the candidate if the upload was redundant for _, candidate := range s.candidates { - if candidate.req != nil && candidate.req != req && candidate.req.sector.index == sector.index { + if candidate.req == req && sector.isUploaded() { candidate.req = nil + break } } diff --git a/worker/upload_test.go b/worker/upload_test.go index 0b6308ffe..0b2488f32 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -637,6 +637,37 @@ func TestUploadRegression(t *testing.T) { } } +func TestUploadSingleSectorSlowHosts(t *testing.T) { + // create test worker + w := newTestWorker(t) + + // add hosts to worker + minShards := 10 + totalShards := 30 + slowHosts := 5 + w.uploadManager.maxOverdrive = uint64(slowHosts) + w.uploadManager.overdriveTimeout = time.Second + hosts := w.AddHosts(totalShards + slowHosts) + + for i := 0; i < slowHosts; i++ { + hosts[i].uploadDelay = time.Hour + } + + // create test data + data := frand.Bytes(rhpv2.SectorSize * minShards) + + // create upload params + params := testParameters(t.Name()) + params.rs.MinShards = minShards + params.rs.TotalShards = totalShards + + // upload data + _, _, err := w.uploadManager.Upload(context.Background(), bytes.NewReader(data), w.Contracts(), params, lockingPriorityUpload) + if err != nil { + t.Fatal(err) + } +} + func testParameters(path string) uploadParameters { return uploadParameters{ bucket: testBucket, From f064397d172fb7d896da898fbf5312005eb06621 Mon Sep 17 00:00:00 2001 From: alexfreska Date: Wed, 3 Apr 2024 14:20:35 +0000 Subject: [PATCH 152/201] ui: v0.50.0 --- go.mod | 8 +++++--- go.sum | 8 ++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index a46cea13f..0010734c3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module go.sia.tech/renterd -go 1.21.6 +go 1.21.7 + +toolchain go1.21.8 require ( github.com/gabriel-vasile/mimetype v1.4.3 @@ -18,7 +20,7 @@ require ( go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.49.0 + go.sia.tech/web/renterd v0.50.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.21.0 golang.org/x/sys v0.18.0 @@ -75,7 +77,7 @@ require ( gitlab.com/NebulousLabs/ratelimit v0.0.0-20200811080431-99b8f0768b2e // indirect gitlab.com/NebulousLabs/siamux v0.0.2-0.20220630142132-142a1443a259 // indirect gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect - go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect + go.sia.tech/web v0.0.0-20240403135501-82ff3a2a3e7c // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.22.0 // indirect golang.org/x/text v0.14.0 // indirect diff --git a/go.sum b/go.sum index 407d36e57..701b93649 100644 --- a/go.sum +++ b/go.sum @@ -257,10 +257,10 @@ go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= go.sia.tech/mux v1.2.0/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQfwSM5pNU9aGtRZme29q3O4= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= -go.sia.tech/web v0.0.0-20231213145933-3f175a86abff h1:/nE7nhewDRxzEdtSKT4SkiUwtjPSiy7Xz7CHEW3MaGQ= -go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGwQqm4qnqntDVFylbvOBbWYYBU= -go.sia.tech/web/renterd v0.49.0 h1:z9iDr3gIJ60zqiydDZ2MUbhANm6GwdvRf4k67+Zrj14= -go.sia.tech/web/renterd v0.49.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= +go.sia.tech/web v0.0.0-20240403135501-82ff3a2a3e7c h1:os2ZFJojHi0ckCNbr8c2GnWGm0ftvHkQUJOfBRGGIfk= +go.sia.tech/web v0.0.0-20240403135501-82ff3a2a3e7c/go.mod h1:nGEhGmI8zV/BcC3LOCC5JLVYpidNYJIvLGIqVRWQBCg= +go.sia.tech/web/renterd v0.50.0 h1:Q955SDKAIej3vEr+P9nOjpgxCKaO+noTnOSUF30SGsc= +go.sia.tech/web/renterd v0.50.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= From f1a9c83f6a304851385ef930527282a2fc8f6fbd Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 3 Apr 2024 16:33:36 +0200 Subject: [PATCH 153/201] worker: don't fail upload when failing to fetch packed slab from bus --- worker/upload.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/worker/upload.go b/worker/upload.go index 4e82f533e..00c0a924b 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -194,13 +194,11 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra // fetch packed slab to upload packedSlabs, err := w.bus.PackedSlabsForUpload(ctx, defaultPackedSlabsLockDuration, uint8(up.rs.MinShards), uint8(up.rs.TotalShards), up.contractSet, 1) if err != nil { - return "", fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) - } - - // upload packed slab - if len(packedSlabs) > 0 { + w.logger.With(zap.Error(err)).Error("couldn't fetch packed slabs from bus") + } else if len(packedSlabs) > 0 { + // upload packed slab if err := w.tryUploadPackedSlab(ctx, mem, packedSlabs[0], up.rs, up.contractSet, lockingPriorityBlockedUpload); err != nil { - w.logger.Error(err) + w.logger.With(zap.Error(err)).Error("failed to upload packed slab") } } } @@ -282,7 +280,7 @@ func (w *worker) tryUploadPackedSlab(ctx context.Context, mem Memory, ps api.Pac // fetch contracts contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: contractSet}) if err != nil { - return fmt.Errorf("couldn't fetch packed slabs from bus: %v", err) + return fmt.Errorf("couldn't fetch contracts from bus: %v", err) } // fetch upload params From 8e21235045e8c94ea3a3e2d6e5ac6b7c2b9e4087 Mon Sep 17 00:00:00 2001 From: Alex Freska Date: Wed, 3 Apr 2024 10:54:37 -0400 Subject: [PATCH 154/201] ci: remove polling for repo dispatch --- .github/workflows/ui.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ui.yml b/.github/workflows/ui.yml index a5b5b440b..78f5e2b6d 100644 --- a/.github/workflows/ui.yml +++ b/.github/workflows/ui.yml @@ -1,9 +1,8 @@ name: Update UI and open PR on: - # Run daily - schedule: - - cron: '0 0 * * *' + repository_dispatch: + types: [update-ui] # Enable manual trigger workflow_dispatch: From 36b5a8043f68e61a52dcd7c38c0a1564d79006ac Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 08:54:07 +0200 Subject: [PATCH 155/201] go.mod: upgrade core dependency --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 0010734c3..756a9aa3b 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/minio/minio-go/v7 v7.0.69 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe - go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 + go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.1 go.sia.tech/hostd v1.0.4-0.20240327150808-8c407121ad92 diff --git a/go.sum b/go.sum index 701b93649..73169eae2 100644 --- a/go.sum +++ b/go.sum @@ -245,6 +245,8 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 h1:jsiab6uAUkaeDL7XEseAxJw7NVhxLNoU2WaB0AHbgG8= go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= +go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 h1:3tgQlTmop/OU5dTHnBmAdNIPgae67wRijaknBhmAOCg= +go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.1 h1:8vtYH/B17NJ4GXLWiONfhwBrrmtJtYiofnO3PfjU298= From 27b848d6e7951b2ab18b94dea9fb3adb45f9e40e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 10:58:19 +0200 Subject: [PATCH 156/201] autopilot: rename smallestvalidScore --- autopilot/autopilot.go | 2 +- autopilot/contractor.go | 6 +++--- autopilot/hostscore.go | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 4e3023a2f..9f3477b0c 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -797,7 +797,7 @@ func (ap *Autopilot) stateHandlerGET(jc jape.Context) { func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { - hc := checkHost(cfg, rs, gc, host, smallestValidScore, 0) + hc := checkHost(cfg, rs, gc, host, minValidScore, 0) if hc.Usability.IsUsable() { usables++ } diff --git a/autopilot/contractor.go b/autopilot/contractor.go index b3dd76ac4..f7033ff30 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -271,7 +271,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // fetch candidate hosts - candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, smallestValidScore) // avoid 0 score hosts + candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, minValidScore) // avoid 0 score hosts if err != nil { return false, err } @@ -1229,7 +1229,7 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin // return early if there's no hosts if len(candidates) == 0 { c.logger.Warn("min host score is set to the smallest non-zero float because there are no candidate hosts") - return smallestValidScore + return minValidScore } // determine the number of random hosts we fetch per iteration when @@ -1263,7 +1263,7 @@ func (c *contractor) calculateMinScore(candidates []scoredHost, numContracts uin return candidates[i].score > candidates[j].score }) if len(candidates) < int(numContracts) { - return smallestValidScore + return minValidScore } else if cutoff := candidates[numContracts-1].score; minScore > cutoff { minScore = cutoff } diff --git a/autopilot/hostscore.go b/autopilot/hostscore.go index 2791adb3d..a11f96944 100644 --- a/autopilot/hostscore.go +++ b/autopilot/hostscore.go @@ -17,9 +17,9 @@ const ( // accept. minProtocolVersion = "1.5.9" - // smallestValidScore is the smallest score that a host can have before + // minValidScore is the smallest score that a host can have before // being ignored. - smallestValidScore = math.SmallestNonzeroFloat64 + minValidScore = math.SmallestNonzeroFloat64 ) func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { From 1784b7a102ae12a9f5468eabcc6c6a4095b41fcc Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 11:08:27 +0200 Subject: [PATCH 157/201] e2e: address comments --- internal/test/e2e/gouging_test.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 27084ba1f..f5fa2d7fa 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -120,33 +120,23 @@ func TestHostMinVersion(t *testing.T) { // create a new test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: int(test.AutopilotConfig.Contracts.Amount), - logger: newTestLoggerCustom(zapcore.ErrorLevel), + hosts: int(test.AutopilotConfig.Contracts.Amount), }) defer cluster.Shutdown() tt := cluster.tt - // check number of contracts - contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ - ContractSet: test.AutopilotConfig.Contracts.Set, - }) - tt.OK(err) - if len(contracts) != int(test.AutopilotConfig.Contracts.Amount) { - t.Fatalf("expected %v contracts, got %v", test.AutopilotConfig.Contracts.Amount, len(contracts)) - } - // set min version to a high value cfg := test.AutopilotConfig cfg.Hosts.MinProtocolVersion = "99.99.99" cluster.UpdateAutopilotConfig(context.Background(), cfg) // contracts in set should drop to 0 - tt.Retry(100, time.Millisecond, func() error { + tt.Retry(100, 100*time.Millisecond, func() error { contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{ ContractSet: test.AutopilotConfig.Contracts.Set, }) tt.OK(err) - if len(contracts) != int(test.AutopilotConfig.Contracts.Amount) { + if len(contracts) != 0 { return fmt.Errorf("expected 0 contracts, got %v", len(contracts)) } return nil From 56adf02b130529e4dec7067c902baa76297dc23a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 11:21:46 +0200 Subject: [PATCH 158/201] stores: address comments --- stores/metadata.go | 2 +- stores/metadata_test.go | 3 --- stores/sql.go | 10 ++++++++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index d8e4962c9..ad4c7d496 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2727,7 +2727,7 @@ func (s *SQLStore) pruneSlabsLoop() { return } - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second+sumDurations(s.retryTransactionIntervals)) err := s.retryTransaction(ctx, pruneSlabs) if err != nil { s.logger.Errorw("failed to prune slabs", zap.Error(err)) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index ebb5f0df1..bdd955808 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4097,7 +4097,6 @@ func TestSlabCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - time.Sleep(100 * time.Millisecond) // check slice count var slabCntr int64 @@ -4115,7 +4114,6 @@ func TestSlabCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - time.Sleep(100 * time.Millisecond) ss.Retry(100, 100*time.Millisecond, func() error { if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { @@ -4164,7 +4162,6 @@ func TestSlabCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - time.Sleep(100 * time.Millisecond) ss.Retry(100, 100*time.Millisecond, func() error { if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { diff --git a/stores/sql.go b/stores/sql.go index b64adddca..02e250e78 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -250,8 +250,6 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { } shutdownCtx, shutdownCtxCancel := context.WithCancel(context.Background()) - slabPruneOngoing := make(chan struct{}) - close(slabPruneOngoing) ss := &SQLStore{ alerts: cfg.Alerts, db: db, @@ -622,3 +620,11 @@ func (s *SQLStore) ResetConsensusSubscription(ctx context.Context) error { s.persistMu.Unlock() return nil } + +func sumDurations(durations []time.Duration) time.Duration { + var sum time.Duration + for _, d := range durations { + sum += d + } + return sum +} From fadd1892acea04fd6a9428dd488985bc9a152cfa Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 14:36:04 +0200 Subject: [PATCH 159/201] worker: move s3 package to worker --- api/object.go | 19 ++- api/worker.go | 58 +++++++-- bus/bus.go | 2 +- cmd/renterd/main.go | 14 +-- internal/node/node.go | 18 ++- internal/test/e2e/cluster.go | 7 +- internal/test/e2e/metadata_test.go | 4 +- worker/client/client.go | 10 +- {s3 => worker/s3}/authentication.go | 0 {s3 => worker/s3}/backend.go | 9 +- {s3 => worker/s3}/s3.go | 6 +- worker/serve.go | 70 ++--------- worker/worker.go | 186 ++++++++++++++++++++-------- worker/worker_test.go | 9 ++ 14 files changed, 247 insertions(+), 165 deletions(-) rename {s3 => worker/s3}/authentication.go (100%) rename {s3 => worker/s3}/backend.go (99%) rename {s3 => worker/s3}/s3.go (97%) diff --git a/api/object.go b/api/object.go index 0382f69a7..b269baff2 100644 --- a/api/object.go +++ b/api/object.go @@ -9,6 +9,7 @@ import ( "net/url" "path/filepath" "strings" + "time" "go.sia.tech/renterd/object" ) @@ -91,12 +92,12 @@ type ( // HeadObjectResponse is the response type for the HEAD /worker/object endpoint. HeadObjectResponse struct { - ContentType string `json:"contentType"` - Etag string `json:"eTag"` - LastModified string `json:"lastModified"` - Range *DownloadRange `json:"range,omitempty"` - Size int64 `json:"size"` - Metadata ObjectUserMetadata `json:"metadata"` + ContentType string + Etag string + LastModified time.Time + Range *ContentRange + Size int64 + Metadata ObjectUserMetadata } // ObjectsDeleteRequest is the request type for the /bus/objects/list endpoint. @@ -151,12 +152,6 @@ func ExtractObjectUserMetadataFrom(metadata map[string]string) ObjectUserMetadat return oum } -// LastModified returns the object's ModTime formatted for use in the -// 'Last-Modified' header -func (o ObjectMetadata) LastModified() string { - return o.ModTime.Std().Format(http.TimeFormat) -} - // ContentType returns the object's MimeType for use in the 'Content-Type' // header, if the object's mime type is empty we try and deduce it from the // extension in the object's name. diff --git a/api/worker.go b/api/worker.go index 6d0c0e9d2..68e80b80f 100644 --- a/api/worker.go +++ b/api/worker.go @@ -3,9 +3,12 @@ package api import ( "errors" "fmt" + "math" + "net/http" "strconv" "strings" + "github.com/gotd/contrib/http_range" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" @@ -23,6 +26,10 @@ var ( // ErrHostOnPrivateNetwork is returned by the worker API when a host can't // be scanned since it is on a private network. ErrHostOnPrivateNetwork = errors.New("host is on a private network") + + // ErrMultiRangeNotSupported is returned by the worker API when a requesta + // tries to download multiple ranges at once. + ErrMultiRangeNotSupported = errors.New("multipart ranges are not supported") ) type ( @@ -216,41 +223,74 @@ type ( } ) -type DownloadRange struct { +// ContentRange represents a content range returned via the "Content-Range" +// header. +type ContentRange struct { Offset int64 Length int64 Size int64 } -func ParseDownloadRange(contentRange string) (DownloadRange, error) { +// DownloadRange represents a requested range for a download via the "Range" +// header. +type DownloadRange struct { + Offset int64 + Length int64 +} + +func (r *DownloadRange) ContentRange(size int64) *ContentRange { + return &ContentRange{ + Offset: r.Offset, + Length: r.Length, + Size: size, + } +} + +func ParseContentRange(contentRange string) (ContentRange, error) { parts := strings.Split(contentRange, " ") if len(parts) != 2 || parts[0] != "bytes" { - return DownloadRange{}, errors.New("missing 'bytes' prefix in range header") + return ContentRange{}, errors.New("missing 'bytes' prefix in range header") } parts = strings.Split(parts[1], "/") if len(parts) != 2 { - return DownloadRange{}, fmt.Errorf("invalid Content-Range header: %s", contentRange) + return ContentRange{}, fmt.Errorf("invalid Content-Range header: %s", contentRange) } rangeStr := parts[0] rangeParts := strings.Split(rangeStr, "-") if len(rangeParts) != 2 { - return DownloadRange{}, errors.New("invalid Content-Range header") + return ContentRange{}, errors.New("invalid Content-Range header") } start, err := strconv.ParseInt(rangeParts[0], 10, 64) if err != nil { - return DownloadRange{}, err + return ContentRange{}, err } end, err := strconv.ParseInt(rangeParts[1], 10, 64) if err != nil { - return DownloadRange{}, err + return ContentRange{}, err } size, err := strconv.ParseInt(parts[1], 10, 64) if err != nil { - return DownloadRange{}, err + return ContentRange{}, err } - return DownloadRange{ + return ContentRange{ Offset: start, Length: end - start + 1, Size: size, }, nil } + +func ParseDownloadRange(req *http.Request) (int64, int64, error) { + // parse the request range + // we pass math.MaxInt64 since a range header in a request doesn't have a + // size + ranges, err := http_range.ParseRange(req.Header.Get("Range"), math.MaxInt64) + if err != nil { + return 0, 0, err + } + + // extract requested offset and length + if len(ranges) > 1 { + return 0, 0, ErrMultiRangeNotSupported + } + return ranges[0].Start, ranges[0].Length, nil +} diff --git a/bus/bus.go b/bus/bus.go index a6b86c0e1..804184e43 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -1290,7 +1290,7 @@ func (b *bus) objectsCopyHandlerPOST(jc jape.Context) { return } - jc.ResponseWriter.Header().Set("Last-Modified", om.LastModified()) + jc.ResponseWriter.Header().Set("Last-Modified", om.ModTime.Std().Format(http.TimeFormat)) jc.ResponseWriter.Header().Set("ETag", api.FormatETag(om.ETag)) jc.Encode(om) } diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 24b309b17..3005b4b34 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -28,9 +28,9 @@ import ( "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" "go.sia.tech/renterd/internal/utils" - "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" "go.sia.tech/renterd/worker" + "go.sia.tech/renterd/worker/s3" "go.sia.tech/web/renterd" "go.uber.org/zap" "golang.org/x/sys/cpu" @@ -573,7 +573,10 @@ func main() { var workers []autopilot.Worker if len(cfg.Worker.Remotes) == 0 { if cfg.Worker.Enabled { - w, fn, err := node.NewWorker(cfg.Worker, bc, seed, logger) + w, s3Handler, fn, err := node.NewWorker(cfg.Worker, s3.Opts{ + AuthDisabled: cfg.S3.DisableAuth, + HostBucketEnabled: cfg.S3.HostBucketEnabled, + }, bc, seed, logger) if err != nil { logger.Fatal("failed to create worker: " + err.Error()) } @@ -588,13 +591,6 @@ func main() { workers = append(workers, wc) if cfg.S3.Enabled { - s3Handler, err := s3.New(bc, wc, logger.Sugar(), s3.Opts{ - AuthDisabled: cfg.S3.DisableAuth, - HostBucketEnabled: cfg.S3.HostBucketEnabled, - }) - if err != nil { - log.Fatal("failed to create s3 client", err) - } s3Srv = &http.Server{ Addr: cfg.S3.Address, Handler: s3Handler, diff --git a/internal/node/node.go b/internal/node/node.go index 8d8a9816c..d247f199a 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -20,6 +20,7 @@ import ( "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker" + "go.sia.tech/renterd/worker/s3" "go.sia.tech/siad/modules" mconsensus "go.sia.tech/siad/modules/consensus" "go.sia.tech/siad/modules/gateway" @@ -31,6 +32,11 @@ import ( "gorm.io/gorm/logger" ) +type Bus interface { + worker.Bus + s3.Bus +} + type BusConfig struct { config.Bus Network *consensus.Network @@ -191,14 +197,18 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht return b.Handler(), shutdownFn, nil } -func NewWorker(cfg config.Worker, b worker.Bus, seed types.PrivateKey, l *zap.Logger) (http.Handler, ShutdownFn, error) { +func NewWorker(cfg config.Worker, s3Opts s3.Opts, b Bus, seed types.PrivateKey, l *zap.Logger) (http.Handler, http.Handler, ShutdownFn, error) { workerKey := blake2b.Sum256(append([]byte("worker"), seed...)) w, err := worker.New(workerKey, cfg.ID, b, cfg.ContractLockTimeout, cfg.BusFlushInterval, cfg.DownloadOverdriveTimeout, cfg.UploadOverdriveTimeout, cfg.DownloadMaxOverdrive, cfg.UploadMaxOverdrive, cfg.DownloadMaxMemory, cfg.UploadMaxMemory, cfg.AllowPrivateIPs, l) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - - return w.Handler(), w.Shutdown, nil + s3Handler, err := s3.New(b, w, l.Named("s3").Sugar(), s3Opts) + if err != nil { + err = errors.Join(err, w.Shutdown(context.Background())) + return nil, nil, nil, fmt.Errorf("failed to create s3 handler: %w", err) + } + return w.Handler(), s3Handler, w.Shutdown, nil } func NewAutopilot(cfg AutopilotConfig, b autopilot.Bus, workers []autopilot.Worker, l *zap.Logger) (http.Handler, RunFn, ShutdownFn, error) { diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 16b3acbfd..0a066cfb5 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -23,8 +23,8 @@ import ( "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" "go.sia.tech/renterd/internal/test" - "go.sia.tech/renterd/s3" "go.sia.tech/renterd/stores" + "go.sia.tech/renterd/worker/s3" "go.uber.org/zap" "go.uber.org/zap/zapcore" "gorm.io/gorm" @@ -315,7 +315,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { busShutdownFns = append(busShutdownFns, bStopFn) // Create worker. - w, wShutdownFn, err := node.NewWorker(workerCfg, busClient, wk, logger) + w, s3Handler, wShutdownFn, err := node.NewWorker(workerCfg, s3.Opts{}, busClient, wk, logger) tt.OK(err) workerAuth := jape.BasicAuth(workerPassword) @@ -328,9 +328,6 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { workerShutdownFns = append(workerShutdownFns, wShutdownFn) // Create S3 API. - s3Handler, err := s3.New(busClient, workerClient, logger.Sugar(), s3.Opts{}) - tt.OK(err) - s3Server := http.Server{ Handler: s3Handler, } diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index af924f847..8fa2bd4ae 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -66,8 +66,8 @@ func TestObjectMetadata(t *testing.T) { } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ ContentType: or.Object.ContentType(), Etag: gor.Etag, - LastModified: or.Object.LastModified(), - Range: &api.DownloadRange{Offset: 1, Length: 1, Size: int64(len(data))}, + LastModified: or.Object.ModTime.Std(), + Range: &api.ContentRange{Offset: 1, Length: 1, Size: int64(len(data))}, Size: int64(len(data)), Metadata: gor.Metadata, }) { diff --git a/worker/client/client.go b/worker/client/client.go index fe284469f..aecad676b 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -303,9 +303,9 @@ func parseObjectResponseHeaders(header http.Header) (api.HeadObjectResponse, err } // parse range - var r *api.DownloadRange + var r *api.ContentRange if cr := header.Get("Content-Range"); cr != "" { - dr, err := api.ParseDownloadRange(cr) + dr, err := api.ParseContentRange(cr) if err != nil { return api.HeadObjectResponse{}, err } @@ -325,10 +325,14 @@ func parseObjectResponseHeaders(header http.Header) (api.HeadObjectResponse, err } } + modTime, err := time.Parse(http.TimeFormat, header.Get("Last-Modified")) + if err != nil { + return api.HeadObjectResponse{}, fmt.Errorf("failed to parse Last-Modified header: %w", err) + } return api.HeadObjectResponse{ ContentType: header.Get("Content-Type"), Etag: trimEtag(header.Get("ETag")), - LastModified: header.Get("Last-Modified"), + LastModified: modTime, Range: r, Size: size, Metadata: api.ExtractObjectUserMetadataFrom(headers), diff --git a/s3/authentication.go b/worker/s3/authentication.go similarity index 100% rename from s3/authentication.go rename to worker/s3/authentication.go diff --git a/s3/backend.go b/worker/s3/backend.go similarity index 99% rename from s3/backend.go rename to worker/s3/backend.go index bb6e3ff7c..2eff3d713 100644 --- a/s3/backend.go +++ b/worker/s3/backend.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "io" + "net/http" "strings" "go.sia.tech/gofakes3" @@ -29,8 +30,8 @@ var ( ) type s3 struct { - b bus - w worker + b Bus + w Worker logger *zap.SugaredLogger } @@ -277,7 +278,7 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // decorate metadata res.Metadata["Content-Type"] = res.ContentType - res.Metadata["Last-Modified"] = res.LastModified + res.Metadata["Last-Modified"] = res.LastModified.Format(http.TimeFormat) // etag to bytes etag, err := hex.DecodeString(res.Etag) @@ -322,7 +323,7 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go // decorate metadata metadata["Content-Type"] = res.ContentType - metadata["Last-Modified"] = res.LastModified + metadata["Last-Modified"] = res.LastModified.Format(http.TimeFormat) // etag to bytes hash, err := hex.DecodeString(res.Etag) diff --git a/s3/s3.go b/worker/s3/s3.go similarity index 97% rename from s3/s3.go rename to worker/s3/s3.go index 0ac1dbd49..045fdf946 100644 --- a/s3/s3.go +++ b/worker/s3/s3.go @@ -23,7 +23,7 @@ type Opts struct { HostBucketEnabled bool } -type bus interface { +type Bus interface { Bucket(ctx context.Context, bucketName string) (api.Bucket, error) CreateBucket(ctx context.Context, bucketName string, opts api.CreateBucketOptions) error DeleteBucket(ctx context.Context, bucketName string) error @@ -46,7 +46,7 @@ type bus interface { UploadParams(ctx context.Context) (api.UploadParams, error) } -type worker interface { +type Worker interface { GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) @@ -66,7 +66,7 @@ func (l *gofakes3Logger) Print(level gofakes3.LogLevel, v ...interface{}) { } } -func New(b bus, w worker, logger *zap.SugaredLogger, opts Opts) (http.Handler, error) { +func New(b Bus, w Worker, logger *zap.SugaredLogger, opts Opts) (http.Handler, error) { namedLogger := logger.Named("s3") s3Backend := &s3{ b: b, diff --git a/worker/serve.go b/worker/serve.go index 25d0c0412..e4467d960 100644 --- a/worker/serve.go +++ b/worker/serve.go @@ -6,7 +6,6 @@ import ( "io" "net/http" - "github.com/gotd/contrib/http_range" "go.sia.tech/renterd/api" ) @@ -24,14 +23,12 @@ type ( } ) -var errMultiRangeNotSupported = errors.New("multipart ranges are not supported") - -func newContentReader(r io.Reader, obj api.Object, offset int64) io.ReadSeeker { +func newContentReader(r io.Reader, size int64, offset int64) io.ReadSeeker { return &contentReader{ r: r, dataOffset: offset, seekOffset: offset, - size: obj.Size, + size: size, } } @@ -58,67 +55,18 @@ func (cr *contentReader) Read(p []byte) (int, error) { return cr.r.Read(p) } -func serveContent(rw http.ResponseWriter, req *http.Request, obj api.Object, downloadFn func(w io.Writer, offset, length int64) error) (int, error) { - // parse offset and length from the request range header - offset, length, err := parseRangeHeader(req, obj) - if err != nil { - return http.StatusRequestedRangeNotSatisfiable, err - } - - // launch the download in a goroutine - pr, pw := io.Pipe() - defer pr.Close() - go func() { - if err := downloadFn(pw, offset, length); err != nil { - pw.CloseWithError(err) - } else { - pw.Close() - } - }() - - // fetch the content type, if not set and we can't infer it from object's - // name we default to application/octet-stream, that is important because we - // have to avoid http.ServeContent to sniff the content type as it would - // require a seek - contentType := obj.ContentType() - if contentType == "" { - contentType = "application/octet-stream" - } - rw.Header().Set("Content-Type", contentType) - - // set the response headers, no need to set Last-Modified header as - // serveContent does that for us - rw.Header().Set("ETag", api.FormatETag(obj.ETag)) +func serveContent(rw http.ResponseWriter, req *http.Request, name string, gor api.GetObjectResponse) { + // set content type and etag + rw.Header().Set("Content-Type", gor.ContentType) + rw.Header().Set("ETag", api.FormatETag(gor.Etag)) // set the user metadata headers - for k, v := range obj.Metadata { + for k, v := range gor.Metadata { rw.Header().Set(fmt.Sprintf("%s%s", api.ObjectMetadataPrefix, k), v) } // create a content reader - rs := newContentReader(pr, obj, offset) + rs := newContentReader(gor.Content, gor.Size, gor.Range.Offset) - http.ServeContent(rw, req, obj.Name, obj.ModTime.Std(), rs) - return http.StatusOK, nil -} - -func parseRangeHeader(req *http.Request, obj api.Object) (int64, int64, error) { - // parse the request range - ranges, err := http_range.ParseRange(req.Header.Get("Range"), obj.Size) - if err != nil { - return 0, 0, err - } - - // extract requested offset and length - offset := int64(0) - length := obj.Size - if len(ranges) == 1 { - offset, length = ranges[0].Start, ranges[0].Length - if offset < 0 || length < 0 || offset+length > obj.Size { - return 0, 0, fmt.Errorf("%w: %v %v", http_range.ErrInvalid, offset, length) - } - } else if len(ranges) > 1 { - return 0, 0, errMultiRangeNotSupported - } - return offset, length, nil + http.ServeContent(rw, req, name, gor.LastModified, rs) } diff --git a/worker/worker.go b/worker/worker.go index 43450d933..d843c0dae 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1,6 +1,7 @@ package worker import ( + "bytes" "context" "errors" "fmt" @@ -872,32 +873,45 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { return } + var off int + if jc.DecodeForm("offset", &off) != nil { + return + } + limit := -1 + if jc.DecodeForm("limit", &limit) != nil { + return + } + // fetch object metadata - res, err := w.bus.Object(jc.Request.Context(), bucket, path, api.GetObjectOptions{ + opts := api.GetObjectOptions{ + Prefix: "", // not relevant for HEAD request + Marker: "", // not relevant for HEAD request + Offset: 0, // not relevant for HEAD request + Limit: 0, // not relevant for HEAD request IgnoreDelim: ignoreDelim, OnlyMetadata: true, + SortBy: "", // not relevant for HEAD request + SortDir: "", // not relevant for HEAD reuqest + } + + gor, err := w.GetObject(jc.Request.Context(), bucket, path, api.DownloadObjectOptions{ + GetObjectOptions: opts, + Range: api.DownloadRange{}, // empty range for HEAD requests }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return - } else if err != nil { - jc.Error(err, http.StatusInternalServerError) + } else if errors.Is(err, http_range.ErrInvalid) { + jc.Error(err, http.StatusBadRequest) return - } else if res.Object == nil { - jc.Error(api.ErrObjectNotFound, http.StatusInternalServerError) // should never happen but checking because we deref. later + } else if jc.Check("couldn't get object", err) != nil { return } + defer gor.Content.Close() // serve the content to ensure we're setting the exact same headers as we // would for a GET request - status, err := serveContent(jc.ResponseWriter, jc.Request, *res.Object, func(io.Writer, int64, int64) error { return nil }) - if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, errMultiRangeNotSupported) { - jc.Error(err, http.StatusBadRequest) - } else if errors.Is(err, http_range.ErrNoOverlap) { - jc.Error(err, http.StatusRequestedRangeNotSatisfiable) - } else if err != nil { - jc.Error(err, status) - } + serveContent(jc.ResponseWriter, jc.Request, path, *gor) } func (w *worker) objectsHandlerGET(jc jape.Context) { @@ -949,60 +963,51 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { } path := jc.PathParam("path") - res, err := w.bus.Object(ctx, bucket, path, opts) - if utils.IsErr(err, api.ErrObjectNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if jc.Check("couldn't get object or entries", err) != nil { - return - } if path == "" || strings.HasSuffix(path, "/") { + // list directory + res, err := w.bus.Object(ctx, bucket, path, opts) + if utils.IsErr(err, api.ErrObjectNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("couldn't get object or entries", err) != nil { + return + } jc.Encode(res.Entries) return } - // return early if the object is empty - if len(res.Object.Slabs) == 0 { + offset, length, err := api.ParseDownloadRange(jc.Request) + if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, api.ErrMultiRangeNotSupported) { + jc.Error(err, http.StatusBadRequest) return - } - - // fetch gouging params - gp, err := w.bus.GougingParams(ctx) - if jc.Check("couldn't fetch gouging parameters from bus", err) != nil { + } else if errors.Is(err, http_range.ErrNoOverlap) { + jc.Error(err, http.StatusRequestedRangeNotSatisfiable) return - } - - // fetch all contracts - contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{}) - if err != nil { + } else if err != nil { jc.Error(err, http.StatusInternalServerError) return } - // create a download function - downloadFn := func(wr io.Writer, offset, length int64) (err error) { - ctx = WithGougingChecker(ctx, w.bus, gp) - err = w.downloadManager.DownloadObject(ctx, wr, *res.Object.Object, uint64(offset), uint64(length), contracts) - if err != nil { - w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) && - !errors.Is(err, errDownloadCancelled) && - !errors.Is(err, io.ErrClosedPipe) { - w.registerAlert(newDownloadFailedAlert(bucket, path, prefix, marker, offset, length, int64(len(contracts)), err)) - } - } + gor, err := w.GetObject(ctx, bucket, path, api.DownloadObjectOptions{ + GetObjectOptions: opts, + Range: api.DownloadRange{ + Offset: offset, + Length: length, + }, + }) + if utils.IsErr(err, api.ErrObjectNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if errors.Is(err, http_range.ErrInvalid) { + jc.Error(err, http.StatusBadRequest) + return + } else if jc.Check("couldn't get object", err) != nil { return } + defer gor.Content.Close() // serve the content - status, err := serveContent(jc.ResponseWriter, jc.Request, *res.Object, downloadFn) - if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, errMultiRangeNotSupported) { - jc.Error(err, http.StatusBadRequest) - } else if errors.Is(err, http_range.ErrNoOverlap) { - jc.Error(err, http.StatusRequestedRangeNotSatisfiable) - } else if err != nil { - jc.Error(err, status) - } + serveContent(jc.ResponseWriter, jc.Request, path, *gor) } func (w *worker) objectsHandlerPUT(jc jape.Context) { @@ -1585,3 +1590,80 @@ func isErrHostUnreachable(err error) bool { func isErrDuplicateTransactionSet(err error) bool { return utils.IsErr(err, modules.ErrDuplicateTransactionSet) } + +func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { + // fetch object + res, err := w.bus.Object(ctx, bucket, path, opts.GetObjectOptions) + if err != nil { + return nil, fmt.Errorf("couldn't fetch object: %w", err) + } else if res.Object == nil { + return nil, errors.New("object is a directory") + } + obj := *res.Object.Object + + // check size of object against range + if opts.Range.Offset+opts.Range.Length > res.Object.Size { + return nil, http_range.ErrInvalid + } + + // fetch gouging params + gp, err := w.bus.GougingParams(ctx) + if err != nil { + return nil, fmt.Errorf("couldn't fetch gouging parameters from bus: %w", err) + } + + // fetch all contracts + contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{}) + if err != nil { + return nil, fmt.Errorf("couldn't fetch contracts from bus: %w", err) + } + + // prepare the content + var content io.ReadCloser + if opts.Range.Length == 0 || obj.TotalSize() == 0 { + // if the object has no content or the requested range is 0, return an + // empty reader + content = io.NopCloser(bytes.NewReader(nil)) + } else { + // otherwise return a pipe reader + downloadFn := func(wr io.Writer, offset, length int64) error { + ctx = WithGougingChecker(ctx, w.bus, gp) + err = w.downloadManager.DownloadObject(ctx, wr, obj, uint64(offset), uint64(length), contracts) + if err != nil { + w.logger.Error(err) + if !errors.Is(err, ErrShuttingDown) && + !errors.Is(err, errDownloadCancelled) && + !errors.Is(err, io.ErrClosedPipe) { + w.registerAlert(newDownloadFailedAlert(bucket, path, opts.Prefix, opts.Marker, offset, length, int64(len(contracts)), err)) + } + return fmt.Errorf("failed to download object: %w", err) + } + return nil + } + pr, pw := io.Pipe() + go func() { + err := downloadFn(pw, opts.Range.Offset, opts.Range.Length) + pw.CloseWithError(err) + }() + content = pr + } + + return &api.GetObjectResponse{ + Content: content, + HeadObjectResponse: api.HeadObjectResponse{ + ContentType: res.Object.MimeType, + Etag: res.Object.ETag, + LastModified: res.Object.ModTime.Std(), + Range: opts.Range.ContentRange(res.Object.Size), + }, + }, nil +} +func (w *worker) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { + panic("not implemented") +} +func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { + panic("not implemented") +} +func (w *worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) { + panic("not implemented") +} diff --git a/worker/worker_test.go b/worker/worker_test.go index 706fae14e..637168950 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -3,8 +3,10 @@ package worker import ( "context" "fmt" + "testing" "time" + "github.com/gotd/contrib/http_range" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" @@ -134,3 +136,10 @@ func newTestSector() (*[rhpv2.SectorSize]byte, types.Hash256) { frand.Read(sector[:]) return §or, rhpv2.SectorRoot(§or) } + +func TestFoo(t *testing.T) { + fmt.Println(http_range.Range{ + Start: 1, + Length: 2, + }.ContentRange(100)) +} From 07cf7f6d117d6c44cdf616a500aba5d9f4128fc2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 15:11:43 +0200 Subject: [PATCH 160/201] e2e: fix TestObjectEntries --- api/worker.go | 7 +++++-- worker/worker.go | 5 +++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/api/worker.go b/api/worker.go index 68e80b80f..ec8be15d3 100644 --- a/api/worker.go +++ b/api/worker.go @@ -289,8 +289,11 @@ func ParseDownloadRange(req *http.Request) (int64, int64, error) { } // extract requested offset and length - if len(ranges) > 1 { + start, length := int64(0), int64(-1) + if len(ranges) == 1 { + start, length = ranges[0].Start, ranges[0].Length + } else if len(ranges) > 1 { return 0, 0, ErrMultiRangeNotSupported } - return ranges[0].Start, ranges[0].Length, nil + return start, length, nil } diff --git a/worker/worker.go b/worker/worker.go index d843c0dae..a3922edd7 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1601,6 +1601,11 @@ func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.Do } obj := *res.Object.Object + // adjust length + if opts.Range.Length == -1 { + opts.Range.Length = res.Object.Size - opts.Range.Offset + } + // check size of object against range if opts.Range.Offset+opts.Range.Length > res.Object.Size { return nil, http_range.ErrInvalid From 7f64f5e7c64a1c15564fa83b5d232423c2181f8b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 16:32:23 +0200 Subject: [PATCH 161/201] worker: implement HeadObject --- internal/test/e2e/cluster_test.go | 5 +- worker/serve.go | 12 ++--- worker/worker.go | 88 ++++++++++++++++++++----------- 3 files changed, 68 insertions(+), 37 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 754bf273e..c197bc7c9 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -410,8 +410,11 @@ func TestObjectEntries(t *testing.T) { } for _, entry := range got { if !strings.HasSuffix(entry.Name, "/") { - if err := w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, entry.Name, api.DownloadObjectOptions{}); err != nil { + buf := new(bytes.Buffer) + if err := w.DownloadObject(context.Background(), buf, api.DefaultBucketName, entry.Name, api.DownloadObjectOptions{}); err != nil { t.Fatal(err) + } else if buf.Len() != int(entry.Size) { + t.Fatal("unexpected", buf.Len(), entry.Size) } } } diff --git a/worker/serve.go b/worker/serve.go index e4467d960..9c02b9d5e 100644 --- a/worker/serve.go +++ b/worker/serve.go @@ -55,18 +55,18 @@ func (cr *contentReader) Read(p []byte) (int, error) { return cr.r.Read(p) } -func serveContent(rw http.ResponseWriter, req *http.Request, name string, gor api.GetObjectResponse) { +func serveContent(rw http.ResponseWriter, req *http.Request, name string, content io.Reader, hor api.HeadObjectResponse) { // set content type and etag - rw.Header().Set("Content-Type", gor.ContentType) - rw.Header().Set("ETag", api.FormatETag(gor.Etag)) + rw.Header().Set("Content-Type", hor.ContentType) + rw.Header().Set("ETag", api.FormatETag(hor.Etag)) // set the user metadata headers - for k, v := range gor.Metadata { + for k, v := range hor.Metadata { rw.Header().Set(fmt.Sprintf("%s%s", api.ObjectMetadataPrefix, k), v) } // create a content reader - rs := newContentReader(gor.Content, gor.Size, gor.Range.Offset) + rs := newContentReader(content, hor.Size, hor.Range.Offset) - http.ServeContent(rw, req, name, gor.LastModified, rs) + http.ServeContent(rw, req, name, hor.LastModified, rs) } diff --git a/worker/worker.go b/worker/worker.go index a3922edd7..300b09d42 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -882,21 +882,25 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { return } - // fetch object metadata - opts := api.GetObjectOptions{ - Prefix: "", // not relevant for HEAD request - Marker: "", // not relevant for HEAD request - Offset: 0, // not relevant for HEAD request - Limit: 0, // not relevant for HEAD request - IgnoreDelim: ignoreDelim, - OnlyMetadata: true, - SortBy: "", // not relevant for HEAD request - SortDir: "", // not relevant for HEAD reuqest + offset, length, err := api.ParseDownloadRange(jc.Request) + if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, api.ErrMultiRangeNotSupported) { + jc.Error(err, http.StatusBadRequest) + return + } else if errors.Is(err, http_range.ErrNoOverlap) { + jc.Error(err, http.StatusRequestedRangeNotSatisfiable) + return + } else if err != nil { + jc.Error(err, http.StatusInternalServerError) + return } - gor, err := w.GetObject(jc.Request.Context(), bucket, path, api.DownloadObjectOptions{ - GetObjectOptions: opts, - Range: api.DownloadRange{}, // empty range for HEAD requests + // fetch object metadata + hor, err := w.HeadObject(jc.Request.Context(), bucket, path, api.HeadObjectOptions{ + IgnoreDelim: ignoreDelim, + Range: api.DownloadRange{ + Offset: offset, + Length: length, + }, }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -907,11 +911,10 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { } else if jc.Check("couldn't get object", err) != nil { return } - defer gor.Content.Close() // serve the content to ensure we're setting the exact same headers as we // would for a GET request - serveContent(jc.ResponseWriter, jc.Request, path, *gor) + serveContent(jc.ResponseWriter, jc.Request, path, bytes.NewReader(nil), *hor) } func (w *worker) objectsHandlerGET(jc jape.Context) { @@ -1007,7 +1010,7 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { defer gor.Content.Close() // serve the content - serveContent(jc.ResponseWriter, jc.Request, path, *gor) + serveContent(jc.ResponseWriter, jc.Request, path, gor.Content, gor.HeadObjectResponse) } func (w *worker) objectsHandlerPUT(jc jape.Context) { @@ -1591,15 +1594,17 @@ func isErrDuplicateTransactionSet(err error) bool { return utils.IsErr(err, modules.ErrDuplicateTransactionSet) } -func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { +func (w *worker) headObject(ctx context.Context, bucket, path string, onlyMetadata bool, opts api.HeadObjectOptions) (*api.HeadObjectResponse, api.ObjectsResponse, error) { // fetch object - res, err := w.bus.Object(ctx, bucket, path, opts.GetObjectOptions) + res, err := w.bus.Object(ctx, bucket, path, api.GetObjectOptions{ + IgnoreDelim: opts.IgnoreDelim, + OnlyMetadata: onlyMetadata, + }) if err != nil { - return nil, fmt.Errorf("couldn't fetch object: %w", err) + return nil, api.ObjectsResponse{}, fmt.Errorf("couldn't fetch object: %w", err) } else if res.Object == nil { - return nil, errors.New("object is a directory") + return nil, api.ObjectsResponse{}, errors.New("object is a directory") } - obj := *res.Object.Object // adjust length if opts.Range.Length == -1 { @@ -1608,9 +1613,34 @@ func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.Do // check size of object against range if opts.Range.Offset+opts.Range.Length > res.Object.Size { - return nil, http_range.ErrInvalid + return nil, api.ObjectsResponse{}, http_range.ErrInvalid } + return &api.HeadObjectResponse{ + ContentType: res.Object.MimeType, + Etag: res.Object.ETag, + LastModified: res.Object.ModTime.Std(), + Range: opts.Range.ContentRange(res.Object.Size), + Size: res.Object.Size, + Metadata: res.Object.Metadata, + }, res, nil +} + +func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { + // head object + hor, res, err := w.headObject(ctx, bucket, path, false, api.HeadObjectOptions{ + IgnoreDelim: opts.IgnoreDelim, + Range: opts.Range, + }) + if err != nil { + return nil, fmt.Errorf("couldn't fetch object: %w", err) + } + obj := *res.Object.Object + + // adjust range + opts.Range.Offset = hor.Range.Offset + opts.Range.Length = hor.Range.Length + // fetch gouging params gp, err := w.bus.GougingParams(ctx) if err != nil { @@ -1654,18 +1684,16 @@ func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.Do } return &api.GetObjectResponse{ - Content: content, - HeadObjectResponse: api.HeadObjectResponse{ - ContentType: res.Object.MimeType, - Etag: res.Object.ETag, - LastModified: res.Object.ModTime.Std(), - Range: opts.Range.ContentRange(res.Object.Size), - }, + Content: content, + HeadObjectResponse: *hor, }, nil } + func (w *worker) HeadObject(ctx context.Context, bucket, path string, opts api.HeadObjectOptions) (*api.HeadObjectResponse, error) { - panic("not implemented") + res, _, err := w.headObject(ctx, bucket, path, true, opts) + return res, err } + func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { panic("not implemented") } From 331b8359cd82c1f8db05d0db7ec9f1ac6dcd461b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 4 Apr 2024 16:54:46 +0200 Subject: [PATCH 162/201] e2e: fix TestObjectMetadata --- api/object.go | 3 +-- internal/test/e2e/metadata_test.go | 4 +++- worker/client/client.go | 2 +- worker/s3/backend.go | 4 ++-- worker/serve.go | 2 +- worker/worker.go | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/api/object.go b/api/object.go index b269baff2..9ab1d6372 100644 --- a/api/object.go +++ b/api/object.go @@ -9,7 +9,6 @@ import ( "net/url" "path/filepath" "strings" - "time" "go.sia.tech/renterd/object" ) @@ -94,7 +93,7 @@ type ( HeadObjectResponse struct { ContentType string Etag string - LastModified time.Time + LastModified TimeRFC3339 Range *ContentRange Size int64 Metadata ObjectUserMetadata diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index 8fa2bd4ae..cc95413b4 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -5,6 +5,7 @@ import ( "context" "reflect" "testing" + "time" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/test" @@ -61,12 +62,13 @@ func TestObjectMetadata(t *testing.T) { // perform a HEAD request and assert the headers are all present hor, err := w.HeadObject(context.Background(), api.DefaultBucketName, t.Name(), api.HeadObjectOptions{Range: api.DownloadRange{Offset: 1, Length: 1}}) + hor.LastModified = api.TimeRFC3339(hor.LastModified.Std().Round(time.Second)) if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ ContentType: or.Object.ContentType(), Etag: gor.Etag, - LastModified: or.Object.ModTime.Std(), + LastModified: api.TimeRFC3339(or.Object.ModTime.Std().Round(time.Second)), Range: &api.ContentRange{Offset: 1, Length: 1, Size: int64(len(data))}, Size: int64(len(data)), Metadata: gor.Metadata, diff --git a/worker/client/client.go b/worker/client/client.go index aecad676b..71fd200ad 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -332,7 +332,7 @@ func parseObjectResponseHeaders(header http.Header) (api.HeadObjectResponse, err return api.HeadObjectResponse{ ContentType: header.Get("Content-Type"), Etag: trimEtag(header.Get("ETag")), - LastModified: modTime, + LastModified: api.TimeRFC3339(modTime), Range: r, Size: size, Metadata: api.ExtractObjectUserMetadataFrom(headers), diff --git a/worker/s3/backend.go b/worker/s3/backend.go index 2eff3d713..d69eaff17 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -278,7 +278,7 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // decorate metadata res.Metadata["Content-Type"] = res.ContentType - res.Metadata["Last-Modified"] = res.LastModified.Format(http.TimeFormat) + res.Metadata["Last-Modified"] = res.LastModified.Std().Format(http.TimeFormat) // etag to bytes etag, err := hex.DecodeString(res.Etag) @@ -323,7 +323,7 @@ func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*go // decorate metadata metadata["Content-Type"] = res.ContentType - metadata["Last-Modified"] = res.LastModified.Format(http.TimeFormat) + metadata["Last-Modified"] = res.LastModified.Std().Format(http.TimeFormat) // etag to bytes hash, err := hex.DecodeString(res.Etag) diff --git a/worker/serve.go b/worker/serve.go index 9c02b9d5e..31c347ff7 100644 --- a/worker/serve.go +++ b/worker/serve.go @@ -68,5 +68,5 @@ func serveContent(rw http.ResponseWriter, req *http.Request, name string, conten // create a content reader rs := newContentReader(content, hor.Size, hor.Range.Offset) - http.ServeContent(rw, req, name, hor.LastModified, rs) + http.ServeContent(rw, req, name, hor.LastModified.Std(), rs) } diff --git a/worker/worker.go b/worker/worker.go index 300b09d42..68bb509e7 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1619,7 +1619,7 @@ func (w *worker) headObject(ctx context.Context, bucket, path string, onlyMetada return &api.HeadObjectResponse{ ContentType: res.Object.MimeType, Etag: res.Object.ETag, - LastModified: res.Object.ModTime.Std(), + LastModified: res.Object.ModTime, Range: opts.Range.ContentRange(res.Object.Size), Size: res.Object.Size, Metadata: res.Object.Metadata, From 5e05f7751720fc30e30b1cefa98cf2580a2bd964 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 10:20:18 +0200 Subject: [PATCH 163/201] worker: implement UploadObject --- worker/upload.go | 3 +- worker/upload_params.go | 29 ++++---- worker/upload_test.go | 15 ++-- worker/worker.go | 151 ++++++++++++++++++++++------------------ 4 files changed, 111 insertions(+), 87 deletions(-) diff --git a/worker/upload.go b/worker/upload.go index 4e82f533e..65322baa9 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -154,8 +154,9 @@ func (w *worker) initUploadManager(maxMemory, maxOverdrive uint64, overdriveTime w.uploadManager = newUploadManager(w.shutdownCtx, w, mm, w.bus, w.bus, w.bus, maxOverdrive, overdriveTimeout, w.contractLockingDuration, logger) } -func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.ContractMetadata, up uploadParameters, opts ...UploadOption) (_ string, err error) { +func (w *worker) upload(ctx context.Context, bucket, path string, r io.Reader, contracts []api.ContractMetadata, opts ...UploadOption) (_ string, err error) { // apply the options + up := defaultParameters(bucket, path) for _, opt := range opts { opt(&up) } diff --git a/worker/upload_params.go b/worker/upload_params.go index d3cca49e4..ae8baa8d0 100644 --- a/worker/upload_params.go +++ b/worker/upload_params.go @@ -38,22 +38,6 @@ func defaultParameters(bucket, path string) uploadParameters { } } -func multipartParameters(bucket, path, uploadID string, partNumber int) uploadParameters { - return uploadParameters{ - bucket: bucket, - path: path, - - multipart: true, - uploadID: uploadID, - partNumber: partNumber, - - ec: object.GenerateEncryptionKey(), // random key - encryptionOffset: 0, // from the beginning - - rs: build.DefaultRedundancySettings, - } -} - type UploadOption func(*uploadParameters) func WithBlockHeight(bh uint64) UploadOption { @@ -92,12 +76,25 @@ func WithPacking(packing bool) UploadOption { } } +func WithPartNumber(partNumber int) UploadOption { + return func(up *uploadParameters) { + up.partNumber = partNumber + } +} + func WithRedundancySettings(rs api.RedundancySettings) UploadOption { return func(up *uploadParameters) { up.rs = rs } } +func WithUploadID(uploadID string) UploadOption { + return func(up *uploadParameters) { + up.uploadID = uploadID + up.multipart = true + } +} + func WithObjectUserMetadata(metadata api.ObjectUserMetadata) UploadOption { return func(up *uploadParameters) { up.metadata = metadata diff --git a/worker/upload_test.go b/worker/upload_test.go index 0b6308ffe..4221a5414 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -220,7 +220,7 @@ func TestUploadPackedSlab(t *testing.T) { uploadBytes := func(n int) { t.Helper() params.path = fmt.Sprintf("%s_%d", t.Name(), c) - _, err := w.upload(context.Background(), bytes.NewReader(frand.Bytes(n)), w.Contracts(), params) + _, err := w.upload(context.Background(), params.bucket, params.path, bytes.NewReader(frand.Bytes(n)), w.Contracts(), testOpts()...) if err != nil { t.Fatal(err) } @@ -505,7 +505,7 @@ func TestRefreshUploaders(t *testing.T) { // upload data contracts := w.Contracts() - _, err := w.upload(context.Background(), bytes.NewReader(data), contracts, params) + _, err := w.upload(context.Background(), params.bucket, t.Name(), bytes.NewReader(data), contracts) if err != nil { t.Fatal(err) } @@ -607,7 +607,7 @@ func TestUploadRegression(t *testing.T) { // upload data ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, err := w.upload(ctx, bytes.NewReader(data), w.Contracts(), params) + _, err := w.upload(ctx, params.bucket, params.path, bytes.NewReader(data), w.Contracts(), testOpts()...) if !errors.Is(err, errUploadInterrupted) { t.Fatal(err) } @@ -616,7 +616,7 @@ func TestUploadRegression(t *testing.T) { unblock() // upload data - _, err = w.upload(context.Background(), bytes.NewReader(data), w.Contracts(), params) + _, err = w.upload(context.Background(), params.bucket, params.path, bytes.NewReader(data), w.Contracts(), testOpts()...) if err != nil { t.Fatal(err) } @@ -637,6 +637,13 @@ func TestUploadRegression(t *testing.T) { } } +func testOpts() []UploadOption { + return []UploadOption{ + WithContractSet(testContractSet), + WithRedundancySettings(testRedundancySettings), + } +} + func testParameters(path string) uploadParameters { return uploadParameters{ bucket: testBucket, diff --git a/worker/worker.go b/worker/worker.go index 68bb509e7..46047d091 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1020,18 +1020,10 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { // grab the path path := jc.PathParam("path") - // fetch the upload parameters - up, err := w.bus.UploadParams(ctx) - if jc.Check("couldn't fetch upload parameters from bus", err) != nil { - return - } - // decode the contract set from the query string var contractset string if jc.DecodeForm("contractset", &contractset) != nil { return - } else if contractset != "" { - up.ContractSet = contractset } // decode the mimetype from the query string @@ -1046,35 +1038,12 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { return } - // return early if the bucket does not exist - _, err = w.bus.Bucket(ctx, bucket) - if utils.IsErr(err, api.ErrBucketNotFound) { - jc.Error(fmt.Errorf("bucket '%s' not found; %w", bucket, err), http.StatusNotFound) - return - } - - // cancel the upload if no contract set is specified - if up.ContractSet == "" { - jc.Error(api.ErrContractSetNotSpecified, http.StatusBadRequest) - return - } - - // cancel the upload if consensus is not synced - if !up.ConsensusState.Synced { - w.logger.Errorf("upload cancelled, err: %v", api.ErrConsensusNotSynced) - jc.Error(api.ErrConsensusNotSynced, http.StatusServiceUnavailable) - return - } - // allow overriding the redundancy settings - rs := up.RedundancySettings - if jc.DecodeForm("minshards", &rs.MinShards) != nil { + var minShards, totalShards int + if jc.DecodeForm("minshards", &minShards) != nil { return } - if jc.DecodeForm("totalshards", &rs.TotalShards) != nil { - return - } - if jc.Check("invalid redundancy settings", rs.Validate()) != nil { + if jc.DecodeForm("totalshards", &totalShards) != nil { return } @@ -1086,40 +1055,28 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { } } - // build options - opts := []UploadOption{ - WithBlockHeight(up.CurrentHeight), - WithContractSet(up.ContractSet), - WithMimeType(mimeType), - WithPacking(up.UploadPacking), - WithRedundancySettings(up.RedundancySettings), - WithObjectUserMetadata(metadata), - } - - // attach gouging checker to the context - ctx = WithGougingChecker(ctx, w.bus, up.GougingParams) - - // fetch contracts - contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: up.ContractSet}) - if jc.Check("couldn't fetch contracts from bus", err) != nil { - return - } - // upload the object - params := defaultParameters(bucket, path) - eTag, err := w.upload(ctx, jc.Request.Body, contracts, params, opts...) - if err := jc.Check("couldn't upload object", err); err != nil { - if err != nil { - w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) && !errors.Is(err, context.Canceled) { - w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, mimeType, rs.MinShards, rs.TotalShards, len(contracts), up.UploadPacking, false, err)) - } - } + resp, err := w.UploadObject(ctx, jc.Request.Body, bucket, path, api.UploadObjectOptions{ + MinShards: minShards, + TotalShards: totalShards, + ContractSet: contractset, + ContentLength: jc.Request.ContentLength, + MimeType: mimeType, + Metadata: metadata, + }) + if utils.IsErr(err, api.ErrBucketNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if utils.IsErr(err, api.ErrContractSetNotSpecified) { + jc.Error(err, http.StatusBadRequest) + return + } else if utils.IsErr(err, api.ErrConsensusNotSynced) { + jc.Error(err, http.StatusServiceUnavailable) return } // set etag header - jc.ResponseWriter.Header().Set("ETag", api.FormatETag(eTag)) + jc.ResponseWriter.Header().Set("ETag", api.FormatETag(resp.ETag)) } func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { @@ -1224,6 +1181,8 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { WithPacking(up.UploadPacking), WithRedundancySettings(up.RedundancySettings), WithCustomKey(upload.Key), + WithPartNumber(partNumber), + WithUploadID(uploadID), } // make sure only one of the following is set @@ -1244,8 +1203,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { } // upload the multipart - params := multipartParameters(bucket, path, uploadID, partNumber) - eTag, err := w.upload(ctx, jc.Request.Body, contracts, params, opts...) + eTag, err := w.upload(ctx, bucket, path, jc.Request.Body, contracts, opts...) if jc.Check("couldn't upload object", err) != nil { if err != nil { w.logger.Error(err) @@ -1695,8 +1653,69 @@ func (w *worker) HeadObject(ctx context.Context, bucket, path string, opts api.H } func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { - panic("not implemented") + // return early if the bucket does not exist + _, err := w.bus.Bucket(ctx, bucket) + if err != nil { + return nil, fmt.Errorf("bucket '%s' not found; %w", bucket, err) + } + + // fetch the upload parameters + up, err := w.bus.UploadParams(ctx) + if err != nil { + return nil, fmt.Errorf("couldn't fetch upload parameters from bus: %w", err) + } else if opts.ContractSet != "" { + up.ContractSet = opts.ContractSet + } else if up.ContractSet == "" { + return nil, api.ErrContractSetNotSpecified + } + + // cancel the upload if consensus is not synced + if !up.ConsensusState.Synced { + return nil, api.ErrConsensusNotSynced + } + + // allow overriding the redundancy settings + if opts.MinShards != 0 { + up.RedundancySettings.MinShards = opts.MinShards + } + if opts.TotalShards != 0 { + up.RedundancySettings.TotalShards = opts.TotalShards + } + err = api.RedundancySettings{MinShards: opts.MinShards, TotalShards: opts.TotalShards}.Validate() + if err != nil { + return nil, fmt.Errorf("invalid redundancy settings: %w", err) + } + + // attach gouging checker to the context + ctx = WithGougingChecker(ctx, w.bus, up.GougingParams) + + // fetch contracts + contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: up.ContractSet}) + if err != nil { + return nil, fmt.Errorf("couldn't fetch contracts from bus: %w", err) + } + + // upload + eTag, err := w.upload(ctx, bucket, path, r, contracts, []UploadOption{ + WithBlockHeight(up.CurrentHeight), + WithContractSet(up.ContractSet), + WithMimeType(opts.MimeType), + WithPacking(up.UploadPacking), + WithRedundancySettings(up.RedundancySettings), + WithObjectUserMetadata(opts.Metadata), + }...) + if err != nil { + w.logger.With(zap.Error(err)).With("path", path).With("bucket", bucket).Error("failed to upload object") + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) && !errors.Is(err, context.Canceled) { + w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, opts.MimeType, up.RedundancySettings.MinShards, up.RedundancySettings.TotalShards, len(contracts), up.UploadPacking, false, err)) + } + return nil, fmt.Errorf("couldn't upload object: %w", err) + } + return &api.UploadObjectResponse{ + ETag: eTag, + }, nil } + func (w *worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) { panic("not implemented") } From 1c01b3994d4cb3a30e142bcb9c31932324ea6890 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 10:42:44 +0200 Subject: [PATCH 164/201] worker: fix TestRefreshUploaders and TestUploadPackedSlabs --- worker/upload_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/worker/upload_test.go b/worker/upload_test.go index 4221a5414..477c1a72c 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -140,6 +140,8 @@ func TestUploadPackedSlab(t *testing.T) { // create upload params params := testParameters(t.Name()) params.packing = true + opts := testOpts() + opts = append(opts, WithPacking(true)) // create test data data := frand.Bytes(128) @@ -220,7 +222,7 @@ func TestUploadPackedSlab(t *testing.T) { uploadBytes := func(n int) { t.Helper() params.path = fmt.Sprintf("%s_%d", t.Name(), c) - _, err := w.upload(context.Background(), params.bucket, params.path, bytes.NewReader(frand.Bytes(n)), w.Contracts(), testOpts()...) + _, err := w.upload(context.Background(), params.bucket, params.path, bytes.NewReader(frand.Bytes(n)), w.Contracts(), opts...) if err != nil { t.Fatal(err) } @@ -502,10 +504,11 @@ func TestRefreshUploaders(t *testing.T) { // create upload params params := testParameters(t.Name()) + opts := testOpts() // upload data contracts := w.Contracts() - _, err := w.upload(context.Background(), params.bucket, t.Name(), bytes.NewReader(data), contracts) + _, err := w.upload(context.Background(), params.bucket, t.Name(), bytes.NewReader(data), contracts, opts...) if err != nil { t.Fatal(err) } From 5bfdfec3f223a2bc1d53d56fc4b6c6a6a14d635c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 11:34:14 +0200 Subject: [PATCH 165/201] publish.yml: downgrade upload-artifact to v3 --- .github/workflows/publish.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 3bf909519..978ad7101 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -112,7 +112,7 @@ jobs: go build -tags="$BUILD_TAGS" -trimpath -o bin/ -a -ldflags '-s -w -linkmode external -extldflags "-static"' ./cmd/renterd cp README.md LICENSE bin/ zip -qj $ZIP_OUTPUT bin/* - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v3 with: name: renterd path: release/ @@ -208,7 +208,7 @@ jobs: /usr/bin/codesign --deep -f -v --timestamp -o runtime,library -s $APPLE_CERT_ID bin/renterd ditto -ck bin $ZIP_OUTPUT xcrun notarytool submit -k ~/private_keys/AuthKey_$APPLE_API_KEY.p8 -d $APPLE_API_KEY -i $APPLE_API_ISSUER --wait --timeout 10m $ZIP_OUTPUT - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v3 with: name: renterd path: release/ @@ -249,7 +249,7 @@ jobs: azuresigntool sign -kvu "${{ secrets.AZURE_KEY_VAULT_URI }}" -kvi "${{ secrets.AZURE_CLIENT_ID }}" -kvt "${{ secrets.AZURE_TENANT_ID }}" -kvs "${{ secrets.AZURE_CLIENT_SECRET }}" -kvc ${{ secrets.AZURE_CERT_NAME }} -tr http://timestamp.digicert.com -v bin/renterd.exe cp README.md LICENSE bin/ 7z a $ZIP_OUTPUT bin/* - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v3 with: name: renterd path: release/ From 5cd675a7895339100891b826c55c354ea108f690 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 11:55:28 +0200 Subject: [PATCH 166/201] e2e: fix TestObjectEntries --- worker/worker.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/worker/worker.go b/worker/worker.go index 46047d091..1cc956484 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1073,6 +1073,8 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { } else if utils.IsErr(err, api.ErrConsensusNotSynced) { jc.Error(err, http.StatusServiceUnavailable) return + } else if jc.Check("couldn't upload object", err) != nil { + return } // set etag header @@ -1681,7 +1683,7 @@ func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str if opts.TotalShards != 0 { up.RedundancySettings.TotalShards = opts.TotalShards } - err = api.RedundancySettings{MinShards: opts.MinShards, TotalShards: opts.TotalShards}.Validate() + err = api.RedundancySettings{MinShards: up.RedundancySettings.MinShards, TotalShards: up.RedundancySettings.TotalShards}.Validate() if err != nil { return nil, fmt.Errorf("invalid redundancy settings: %w", err) } From 055a6e0e04e2f938d7997a49ebf4f7dde58a5477 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 12:13:32 +0200 Subject: [PATCH 167/201] e2e: fix TestObjecdtMetadata NDF --- internal/test/e2e/metadata_test.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index cc95413b4..3894dbdee 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -3,6 +3,7 @@ package e2e import ( "bytes" "context" + "net/http" "reflect" "testing" "time" @@ -60,15 +61,22 @@ func TestObjectMetadata(t *testing.T) { t.Fatal("missing etag") } + // HeadObject retrieves the modtime from a http header so it's not as + // accurate as the modtime from the object GET endpoint which returns it in + // the body. + orModtime, err := time.Parse(http.TimeFormat, or.Object.ModTime.Std().Format(http.TimeFormat)) + if err != nil { + t.Fatal(err) + } + // perform a HEAD request and assert the headers are all present hor, err := w.HeadObject(context.Background(), api.DefaultBucketName, t.Name(), api.HeadObjectOptions{Range: api.DownloadRange{Offset: 1, Length: 1}}) - hor.LastModified = api.TimeRFC3339(hor.LastModified.Std().Round(time.Second)) if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ ContentType: or.Object.ContentType(), Etag: gor.Etag, - LastModified: api.TimeRFC3339(or.Object.ModTime.Std().Round(time.Second)), + LastModified: api.TimeRFC3339(orModtime), Range: &api.ContentRange{Offset: 1, Length: 1, Size: int64(len(data))}, Size: int64(len(data)), Metadata: gor.Metadata, From 87d659c59a4820f2d2eb8827cd299cc1ef9fdef7 Mon Sep 17 00:00:00 2001 From: PJ Date: Fri, 5 Apr 2024 12:10:17 +0200 Subject: [PATCH 168/201] stores: fix retryTransaction --- stores/sql.go | 25 ++++++++++++++------ stores/sql_test.go | 59 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 7 deletions(-) diff --git a/stores/sql.go b/stores/sql.go index 34a6d78ab..320ba1cf6 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -525,7 +525,7 @@ func (ss *SQLStore) applyUpdates(force bool) error { return nil } -func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) error) error { +func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) error) (err error) { abortRetry := func(err error) bool { if err == nil || errors.Is(err, context.Canceled) || @@ -549,14 +549,25 @@ func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) er } return false } - var err error - for i := 0; i < len(s.retryTransactionIntervals); i++ { + + attempts := len(s.retryTransactionIntervals) + 1 + for i := 1; i <= attempts; i++ { + // execute the transaction err = s.db.WithContext(ctx).Transaction(fc) - if abortRetry(err) { - return err + if err == nil || abortRetry(err) { + return } - s.logger.Warn(fmt.Sprintf("transaction attempt %d/%d failed, retry in %v, err: %v", i+1, len(s.retryTransactionIntervals), s.retryTransactionIntervals[i], err)) - time.Sleep(s.retryTransactionIntervals[i]) + + // if this was the last attempt, return the error + if i-1 == len(s.retryTransactionIntervals) { + s.logger.Warn(fmt.Sprintf("transaction attempt %d/%d failed, err: %v", i, attempts, err)) + return + } + + // log the failed attempt and sleep before retrying + interval := s.retryTransactionIntervals[i-1] + s.logger.Warn(fmt.Sprintf("transaction attempt %d/%d failed, retry in %v, err: %v", i, attempts, interval, err)) + time.Sleep(interval) } return fmt.Errorf("retryTransaction failed: %w", err) } diff --git a/stores/sql_test.go b/stores/sql_test.go index 2d29763bb..01ce8fe6b 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -4,13 +4,16 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" "os" "path/filepath" + "reflect" "strings" "testing" "time" + "github.com/google/go-cmp/cmp" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" @@ -18,6 +21,7 @@ import ( "go.sia.tech/siad/modules" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" "gorm.io/gorm" "gorm.io/gorm/logger" "lukechampine.com/frand" @@ -417,3 +421,58 @@ func TestApplyUpdatesErr(t *testing.T) { t.Fatal("lastSave should not have changed") } } + +func TestRetryTransaction(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // create custom logger to capture logs + observedZapCore, observedLogs := observer.New(zap.InfoLevel) + ss.logger = zap.New(observedZapCore).Sugar() + + // collectLogs returns all logs + collectLogs := func() (logs []string) { + t.Helper() + for _, entry := range observedLogs.All() { + logs = append(logs, entry.Message) + } + return + } + + // disable retries and retry a transaction that fails + ss.retryTransactionIntervals = nil + ss.retryTransaction(context.Background(), func(tx *gorm.DB) error { return errors.New("database locked") }) + + // assert transaction is attempted once and not retried + got := collectLogs() + want := []string{"transaction attempt 1/1 failed, err: database locked"} + if !reflect.DeepEqual(got, want) { + t.Fatal("unexpected logs", cmp.Diff(got, want)) + } + + // enable retries and retry the same transaction + ss.retryTransactionIntervals = []time.Duration{ + 5 * time.Millisecond, + 10 * time.Millisecond, + 15 * time.Millisecond, + } + ss.retryTransaction(context.Background(), func(tx *gorm.DB) error { return errors.New("database locked") }) + + // assert transaction is retried 4 times in total + got = collectLogs() + want = append(want, + "transaction attempt 1/4 failed, retry in 5ms, err: database locked", + "transaction attempt 2/4 failed, retry in 10ms, err: database locked", + "transaction attempt 3/4 failed, retry in 15ms, err: database locked", + "transaction attempt 4/4 failed, err: database locked", + ) + if !reflect.DeepEqual(got, want) { + t.Fatal("unexpected logs", cmp.Diff(got, want)) + } + + // retry transaction that aborts, assert no logs were added + ss.retryTransaction(context.Background(), func(tx *gorm.DB) error { return context.Canceled }) + if len(observedLogs.All()) != len(want) { + t.Fatal("expected no logs") + } +} From 26e299966d4c7b4d29e06e423755f5bd85fed1d4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 13:25:43 +0200 Subject: [PATCH 169/201] e2e: fix TestS3Basic --- api/object.go | 8 ++++---- api/worker.go | 12 ++++++------ internal/test/e2e/cluster_test.go | 6 +++--- internal/test/e2e/metadata_test.go | 2 +- worker/s3/backend.go | 2 +- worker/worker.go | 20 ++++++++++---------- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/api/object.go b/api/object.go index 9ab1d6372..41ab7ae62 100644 --- a/api/object.go +++ b/api/object.go @@ -208,12 +208,12 @@ type ( HeadObjectOptions struct { IgnoreDelim bool - Range DownloadRange + Range *DownloadRange } DownloadObjectOptions struct { GetObjectOptions - Range DownloadRange + Range *DownloadRange } GetObjectOptions struct { @@ -291,7 +291,7 @@ func (opts DownloadObjectOptions) ApplyValues(values url.Values) { } func (opts DownloadObjectOptions) ApplyHeaders(h http.Header) { - if opts.Range != (DownloadRange{}) { + if opts.Range != nil { if opts.Range.Length == -1 { h.Set("Range", fmt.Sprintf("bytes=%v-", opts.Range.Offset)) } else { @@ -313,7 +313,7 @@ func (opts HeadObjectOptions) Apply(values url.Values) { } func (opts HeadObjectOptions) ApplyHeaders(h http.Header) { - if opts.Range != (DownloadRange{}) { + if opts.Range != nil { if opts.Range.Length == -1 { h.Set("Range", fmt.Sprintf("bytes=%v-", opts.Range.Offset)) } else { diff --git a/api/worker.go b/api/worker.go index ec8be15d3..c0249135f 100644 --- a/api/worker.go +++ b/api/worker.go @@ -279,21 +279,21 @@ func ParseContentRange(contentRange string) (ContentRange, error) { }, nil } -func ParseDownloadRange(req *http.Request) (int64, int64, error) { +func ParseDownloadRange(req *http.Request) (DownloadRange, error) { // parse the request range // we pass math.MaxInt64 since a range header in a request doesn't have a // size ranges, err := http_range.ParseRange(req.Header.Get("Range"), math.MaxInt64) if err != nil { - return 0, 0, err + return DownloadRange{}, err } // extract requested offset and length - start, length := int64(0), int64(-1) + dr := DownloadRange{Offset: 0, Length: -1} if len(ranges) == 1 { - start, length = ranges[0].Start, ranges[0].Length + dr.Offset, dr.Length = ranges[0].Start, ranges[0].Length } else if len(ranges) > 1 { - return 0, 0, ErrMultiRangeNotSupported + return DownloadRange{}, ErrMultiRangeNotSupported } - return start, length, nil + return dr, nil } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index c197bc7c9..55db48c19 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -588,7 +588,7 @@ func TestUploadDownloadBasic(t *testing.T) { for i := int64(0); i < 4; i++ { offset := i * 32 var buffer bytes.Buffer - tt.OK(w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{Range: api.DownloadRange{Offset: offset, Length: 32}})) + tt.OK(w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{Range: &api.DownloadRange{Offset: offset, Length: 32}})) if !bytes.Equal(data[offset:offset+32], buffer.Bytes()) { fmt.Println(data[offset : offset+32]) fmt.Println(buffer.Bytes()) @@ -1562,7 +1562,7 @@ func TestUploadPacking(t *testing.T) { &buffer, api.DefaultBucketName, path, - api.DownloadObjectOptions{Range: api.DownloadRange{Offset: offset, Length: length}}, + api.DownloadObjectOptions{Range: &api.DownloadRange{Offset: offset, Length: length}}, ); err != nil { t.Fatal(err) } @@ -2131,7 +2131,7 @@ func TestMultipartUploads(t *testing.T) { } // Download a range of the object - gor, err = w.GetObject(context.Background(), api.DefaultBucketName, objPath, api.DownloadObjectOptions{Range: api.DownloadRange{Offset: 0, Length: 1}}) + gor, err = w.GetObject(context.Background(), api.DefaultBucketName, objPath, api.DownloadObjectOptions{Range: &api.DownloadRange{Offset: 0, Length: 1}}) tt.OK(err) if gor.Range == nil || gor.Range.Offset != 0 || gor.Range.Length != 1 { t.Fatal("unexpected range:", gor.Range) diff --git a/internal/test/e2e/metadata_test.go b/internal/test/e2e/metadata_test.go index 3894dbdee..4bb1ea2dd 100644 --- a/internal/test/e2e/metadata_test.go +++ b/internal/test/e2e/metadata_test.go @@ -70,7 +70,7 @@ func TestObjectMetadata(t *testing.T) { } // perform a HEAD request and assert the headers are all present - hor, err := w.HeadObject(context.Background(), api.DefaultBucketName, t.Name(), api.HeadObjectOptions{Range: api.DownloadRange{Offset: 1, Length: 1}}) + hor, err := w.HeadObject(context.Background(), api.DefaultBucketName, t.Name(), api.HeadObjectOptions{Range: &api.DownloadRange{Offset: 1, Length: 1}}) if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(hor, &api.HeadObjectResponse{ diff --git a/worker/s3/backend.go b/worker/s3/backend.go index d69eaff17..a8dd1cb22 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -250,7 +250,7 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range if rangeRequest.End >= 0 { length = rangeRequest.End - rangeRequest.Start + 1 } - opts.Range = api.DownloadRange{Offset: rangeRequest.Start, Length: length} + opts.Range = &api.DownloadRange{Offset: rangeRequest.Start, Length: length} } res, err := s.w.GetObject(ctx, bucketName, objectName, opts) diff --git a/worker/worker.go b/worker/worker.go index 1cc956484..ed6302f4a 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -882,7 +882,7 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { return } - offset, length, err := api.ParseDownloadRange(jc.Request) + dr, err := api.ParseDownloadRange(jc.Request) if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, api.ErrMultiRangeNotSupported) { jc.Error(err, http.StatusBadRequest) return @@ -897,10 +897,7 @@ func (w *worker) objectsHandlerHEAD(jc jape.Context) { // fetch object metadata hor, err := w.HeadObject(jc.Request.Context(), bucket, path, api.HeadObjectOptions{ IgnoreDelim: ignoreDelim, - Range: api.DownloadRange{ - Offset: offset, - Length: length, - }, + Range: &dr, }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -979,7 +976,7 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { return } - offset, length, err := api.ParseDownloadRange(jc.Request) + dr, err := api.ParseDownloadRange(jc.Request) if errors.Is(err, http_range.ErrInvalid) || errors.Is(err, api.ErrMultiRangeNotSupported) { jc.Error(err, http.StatusBadRequest) return @@ -993,10 +990,7 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { gor, err := w.GetObject(ctx, bucket, path, api.DownloadObjectOptions{ GetObjectOptions: opts, - Range: api.DownloadRange{ - Offset: offset, - Length: length, - }, + Range: &dr, }) if utils.IsErr(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) @@ -1567,6 +1561,9 @@ func (w *worker) headObject(ctx context.Context, bucket, path string, onlyMetada } // adjust length + if opts.Range == nil { + opts.Range = &api.DownloadRange{Offset: 0, Length: -1} + } if opts.Range.Length == -1 { opts.Range.Length = res.Object.Size - opts.Range.Offset } @@ -1598,6 +1595,9 @@ func (w *worker) GetObject(ctx context.Context, bucket, path string, opts api.Do obj := *res.Object.Object // adjust range + if opts.Range == nil { + opts.Range = &api.DownloadRange{} + } opts.Range.Offset = hor.Range.Offset opts.Range.Length = hor.Range.Length From 7ed685f3f41ba7813adc63358dd02a29c8117598 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 13:28:59 +0200 Subject: [PATCH 170/201] worker: address comments --- worker/upload.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/worker/upload.go b/worker/upload.go index 7bcaf5c52..664f31539 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -1056,6 +1056,13 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { // redundant sectors can't complete the upload if sector.isUploaded() { + // release the candidate + for _, candidate := range s.candidates { + if candidate.req == req { + candidate.req = nil + break + } + } return false, false } @@ -1069,14 +1076,6 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { // update uploaded sectors s.numUploaded++ - // release the candidate if the upload was redundant - for _, candidate := range s.candidates { - if candidate.req == req && sector.isUploaded() { - candidate.req = nil - break - } - } - // release memory s.mem.ReleaseSome(rhpv2.SectorSize) From 6f352b1bf9e931648292822eac9616bddc90f53f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 14:16:09 +0200 Subject: [PATCH 171/201] worker: implement UploadMultipartUploadPart --- api/multipart.go | 5 ++ api/object.go | 16 +++- api/setting.go | 10 ++- worker/worker.go | 196 +++++++++++++++++++++++++++-------------------- 4 files changed, 135 insertions(+), 92 deletions(-) diff --git a/api/multipart.go b/api/multipart.go index ee26567b1..ecd19789f 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -7,6 +7,11 @@ import ( ) var ( + // ErrInvalidMultipartEncryptionSettings is returned if the multipart upload + // has an invalid combination of encryption params. e.g. when encryption is + // enabled but not offset is set. + ErrInvalidMultipartEncryptionSettings = errors.New("invalid multipart encryption settings") + // ErrMultipartUploadNotFound is returned if the specified multipart upload // wasn't found. ErrMultipartUploadNotFound = errors.New("multipart upload not found") diff --git a/api/object.go b/api/object.go index 41ab7ae62..91332eec7 100644 --- a/api/object.go +++ b/api/object.go @@ -241,7 +241,6 @@ type ( // UploadObjectOptions is the options type for the worker client. UploadObjectOptions struct { - Offset int MinShards int TotalShards int ContractSet string @@ -251,15 +250,15 @@ type ( } UploadMultipartUploadPartOptions struct { + ContractSet string + MinShards int + TotalShards int EncryptionOffset *int ContentLength int64 } ) func (opts UploadObjectOptions) ApplyValues(values url.Values) { - if opts.Offset != 0 { - values.Set("offset", fmt.Sprint(opts.Offset)) - } if opts.MinShards != 0 { values.Set("minshards", fmt.Sprint(opts.MinShards)) } @@ -284,6 +283,15 @@ func (opts UploadMultipartUploadPartOptions) Apply(values url.Values) { if opts.EncryptionOffset != nil { values.Set("offset", fmt.Sprint(*opts.EncryptionOffset)) } + if opts.MinShards != 0 { + values.Set("minshards", fmt.Sprint(opts.MinShards)) + } + if opts.TotalShards != 0 { + values.Set("totalshards", fmt.Sprint(opts.TotalShards)) + } + if opts.ContractSet != "" { + values.Set("contractset", opts.ContractSet) + } } func (opts DownloadObjectOptions) ApplyValues(values url.Values) { diff --git a/api/setting.go b/api/setting.go index 0c0057410..923863e58 100644 --- a/api/setting.go +++ b/api/setting.go @@ -24,6 +24,10 @@ const ( ) var ( + // ErrInvalidRedundancySettings is returned if the redundancy settings are + // not valid + ErrInvalidRedundancySettings = errors.New("invalid redundancy settings") + // ErrSettingNotFound is returned if a requested setting is not present in the // database. ErrSettingNotFound = errors.New("setting not found") @@ -136,13 +140,13 @@ func (rs RedundancySettings) SlabSizeNoRedundancy() uint64 { // valid. func (rs RedundancySettings) Validate() error { if rs.MinShards < 1 { - return errors.New("MinShards must be greater than 0") + return fmt.Errorf("%w: MinShards must be greater than 0", ErrInvalidRedundancySettings) } if rs.TotalShards < rs.MinShards { - return errors.New("TotalShards must be at least MinShards") + return fmt.Errorf("%w: TotalShards must be at least MinShards", ErrInvalidRedundancySettings) } if rs.TotalShards > 255 { - return errors.New("TotalShards must be less than 256") + return fmt.Errorf("%w: TotalShards must be less than 256", ErrInvalidRedundancySettings) } return nil } diff --git a/worker/worker.go b/worker/worker.go index ed6302f4a..ad3ae8a5e 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1058,7 +1058,10 @@ func (w *worker) objectsHandlerPUT(jc jape.Context) { MimeType: mimeType, Metadata: metadata, }) - if utils.IsErr(err, api.ErrBucketNotFound) { + if utils.IsErr(err, api.ErrInvalidRedundancySettings) { + jc.Error(err, http.StatusBadRequest) + return + } else if utils.IsErr(err, api.ErrBucketNotFound) { jc.Error(err, http.StatusNotFound) return } else if utils.IsErr(err, api.ErrContractSetNotSpecified) { @@ -1082,34 +1085,10 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { // grab the path path := jc.PathParam("path") - // fetch the upload parameters - up, err := w.bus.UploadParams(ctx) - if jc.Check("couldn't fetch upload parameters from bus", err) != nil { - return - } - - // attach gouging checker to the context - ctx = WithGougingChecker(ctx, w.bus, up.GougingParams) - - // cancel the upload if no contract set is specified - if up.ContractSet == "" { - jc.Error(api.ErrContractSetNotSpecified, http.StatusBadRequest) - return - } - - // cancel the upload if consensus is not synced - if !up.ConsensusState.Synced { - w.logger.Errorf("upload cancelled, err: %v", api.ErrConsensusNotSynced) - jc.Error(api.ErrConsensusNotSynced, http.StatusServiceUnavailable) - return - } - // decode the contract set from the query string var contractset string if jc.DecodeForm("contractset", &contractset) != nil { return - } else if contractset != "" { - up.ContractSet = contractset } // decode the bucket from the query string @@ -1118,13 +1097,6 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } - // return early if the bucket does not exist - _, err = w.bus.Bucket(ctx, bucket) - if utils.IsErr(err, api.ErrBucketNotFound) { - jc.Error(fmt.Errorf("bucket '%s' not found; %w", bucket, err), http.StatusNotFound) - return - } - // decode the upload id var uploadID string if jc.DecodeForm("uploadid", &uploadID) != nil { @@ -1141,14 +1113,11 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { } // allow overriding the redundancy settings - rs := up.RedundancySettings - if jc.DecodeForm("minshards", &rs.MinShards) != nil { - return - } - if jc.DecodeForm("totalshards", &rs.TotalShards) != nil { + var minShards, totalShards int + if jc.DecodeForm("minshards", &minShards) != nil { return } - if jc.Check("invalid redundancy settings", rs.Validate()) != nil { + if jc.DecodeForm("totalshards", &totalShards) != nil { return } @@ -1156,62 +1125,40 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { var offset int if jc.DecodeForm("offset", &offset) != nil { return - } else if offset < 0 { - jc.Error(errors.New("offset must be positive"), http.StatusBadRequest) - return } - // fetch upload from bus - upload, err := w.bus.MultipartUpload(ctx, uploadID) - if utils.IsErr(err, api.ErrMultipartUploadNotFound) { + // upload the multipart + resp, err := w.UploadMultipartUploadPart(ctx, jc.Request.Body, bucket, path, uploadID, partNumber, api.UploadMultipartUploadPartOptions{ + ContractSet: contractset, + MinShards: minShards, + TotalShards: totalShards, + EncryptionOffset: nil, + ContentLength: jc.Request.ContentLength, + }) + if utils.IsErr(err, api.ErrInvalidRedundancySettings) { + jc.Error(err, http.StatusBadRequest) + return + } else if utils.IsErr(err, api.ErrBucketNotFound) { jc.Error(err, http.StatusNotFound) return - } else if jc.Check("failed to fetch multipart upload", err) != nil { + } else if utils.IsErr(err, api.ErrContractSetNotSpecified) { + jc.Error(err, http.StatusBadRequest) return - } - - // built options - opts := []UploadOption{ - WithBlockHeight(up.CurrentHeight), - WithContractSet(up.ContractSet), - WithPacking(up.UploadPacking), - WithRedundancySettings(up.RedundancySettings), - WithCustomKey(upload.Key), - WithPartNumber(partNumber), - WithUploadID(uploadID), - } - - // make sure only one of the following is set - if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && jc.Request.FormValue("offset") == "" { - jc.Error(errors.New("if object encryption (pre-erasure coding) wasn't disabled by creating the multipart upload with the no-op key, the offset needs to be set"), http.StatusBadRequest) + } else if utils.IsErr(err, api.ErrConsensusNotSynced) { + jc.Error(err, http.StatusServiceUnavailable) return - } else if encryptionEnabled { - opts = append(opts, WithCustomEncryptionOffset(uint64(offset))) - } - - // attach gouging checker to the context - ctx = WithGougingChecker(ctx, w.bus, up.GougingParams) - - // fetch contracts - contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: up.ContractSet}) - if jc.Check("couldn't fetch contracts from bus", err) != nil { + } else if utils.IsErr(err, api.ErrMultipartUploadNotFound) { + jc.Error(err, http.StatusNotFound) return - } - - // upload the multipart - eTag, err := w.upload(ctx, bucket, path, jc.Request.Body, contracts, opts...) - if jc.Check("couldn't upload object", err) != nil { - if err != nil { - w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) { - w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, "", rs.MinShards, rs.TotalShards, len(contracts), up.UploadPacking, true, err)) - } - } + } else if utils.IsErr(err, api.ErrInvalidMultipartEncryptionSettings) { + jc.Error(err, http.StatusBadRequest) + return + } else if jc.Check("couldn't upload multipart part", err) != nil { return } // set etag header - jc.ResponseWriter.Header().Set("ETag", api.FormatETag(eTag)) + jc.ResponseWriter.Header().Set("ETag", api.FormatETag(resp.ETag)) } func (w *worker) objectsHandlerDELETE(jc jape.Context) { @@ -1685,7 +1632,7 @@ func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str } err = api.RedundancySettings{MinShards: up.RedundancySettings.MinShards, TotalShards: up.RedundancySettings.TotalShards}.Validate() if err != nil { - return nil, fmt.Errorf("invalid redundancy settings: %w", err) + return nil, err } // attach gouging checker to the context @@ -1719,5 +1666,84 @@ func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str } func (w *worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) { - panic("not implemented") + // return early if the bucket does not exist + _, err := w.bus.Bucket(ctx, bucket) + if err != nil { + return nil, fmt.Errorf("bucket '%s' not found; %w", bucket, err) + } + + // fetch the upload parameters + up, err := w.bus.UploadParams(ctx) + if err != nil { + return nil, fmt.Errorf("couldn't fetch upload parameters from bus: %w", err) + } else if opts.ContractSet != "" { + up.ContractSet = opts.ContractSet + } else if up.ContractSet == "" { + return nil, api.ErrContractSetNotSpecified + } + + // cancel the upload if consensus is not synced + if !up.ConsensusState.Synced { + return nil, api.ErrConsensusNotSynced + } + + // allow overriding the redundancy settings + if opts.MinShards != 0 { + up.RedundancySettings.MinShards = opts.MinShards + } + if opts.TotalShards != 0 { + up.RedundancySettings.TotalShards = opts.TotalShards + } + err = api.RedundancySettings{MinShards: up.RedundancySettings.MinShards, TotalShards: up.RedundancySettings.TotalShards}.Validate() + if err != nil { + return nil, err + } + + // fetch upload from bus + upload, err := w.bus.MultipartUpload(ctx, uploadID) + if err != nil { + return nil, fmt.Errorf("couldn't fetch multipart upload: %w", err) + } + + // attach gouging checker to the context + ctx = WithGougingChecker(ctx, w.bus, up.GougingParams) + + // prepare opts + uploadOpts := []UploadOption{ + WithBlockHeight(up.CurrentHeight), + WithContractSet(up.ContractSet), + WithPacking(up.UploadPacking), + WithRedundancySettings(up.RedundancySettings), + WithCustomKey(upload.Key), + WithPartNumber(partNumber), + WithUploadID(uploadID), + } + + // make sure only one of the following is set + if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && opts.EncryptionOffset == nil { + return nil, fmt.Errorf("%w: if object encryption (pre-erasure coding) wasn't disabled by creating the multipart upload with the no-op key, the offset needs to be set", api.ErrInvalidMultipartEncryptionSettings) + } else if opts.EncryptionOffset != nil && *opts.EncryptionOffset < 0 { + return nil, fmt.Errorf("%w: encryption offset must be positive", api.ErrInvalidMultipartEncryptionSettings) + } else if encryptionEnabled { + uploadOpts = append(uploadOpts, WithCustomEncryptionOffset(uint64(*opts.EncryptionOffset))) + } + + // fetch contracts + contracts, err := w.bus.Contracts(ctx, api.ContractsOpts{ContractSet: up.ContractSet}) + if err != nil { + return nil, fmt.Errorf("couldn't fetch contracts from bus: %w", err) + } + + // upload + eTag, err := w.upload(ctx, bucket, path, r, contracts, uploadOpts...) + if err != nil { + w.logger.With(zap.Error(err)).With("path", path).With("bucket", bucket).Error("failed to upload object") + if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) && !errors.Is(err, context.Canceled) { + w.registerAlert(newUploadFailedAlert(bucket, path, up.ContractSet, "", up.RedundancySettings.MinShards, up.RedundancySettings.TotalShards, len(contracts), up.UploadPacking, false, err)) + } + return nil, fmt.Errorf("couldn't upload object: %w", err) + } + return &api.UploadMultipartUploadPartResponse{ + ETag: eTag, + }, nil } From 35d583d9e6d2de13ec368cf9ee1e97670f2d3e0f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 14:34:24 +0200 Subject: [PATCH 172/201] e2e: fix TestMultipartUploads --- worker/worker.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/worker/worker.go b/worker/worker.go index ad3ae8a5e..7dbbd233e 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1121,20 +1121,25 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } + // prepare options + opts := api.UploadMultipartUploadPartOptions{ + ContractSet: contractset, + MinShards: minShards, + TotalShards: totalShards, + EncryptionOffset: nil, + ContentLength: jc.Request.ContentLength, + } + // get the offset var offset int if jc.DecodeForm("offset", &offset) != nil { return + } else if jc.Request.FormValue("offset") != "" { + opts.EncryptionOffset = &offset } // upload the multipart - resp, err := w.UploadMultipartUploadPart(ctx, jc.Request.Body, bucket, path, uploadID, partNumber, api.UploadMultipartUploadPartOptions{ - ContractSet: contractset, - MinShards: minShards, - TotalShards: totalShards, - EncryptionOffset: nil, - ContentLength: jc.Request.ContentLength, - }) + resp, err := w.UploadMultipartUploadPart(ctx, jc.Request.Body, bucket, path, uploadID, partNumber, opts) if utils.IsErr(err, api.ErrInvalidRedundancySettings) { jc.Error(err, http.StatusBadRequest) return From 59727fbde7f3cbb04accc97eafb710297f847ba2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 5 Apr 2024 14:45:27 +0200 Subject: [PATCH 173/201] go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 73169eae2..260117032 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,6 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 h1:jsiab6uAUkaeDL7XEseAxJw7NVhxLNoU2WaB0AHbgG8= -go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 h1:3tgQlTmop/OU5dTHnBmAdNIPgae67wRijaknBhmAOCg= go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= From 3ca1b242f1222df8dc063a2aade3602fa9c2f434 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 01:28:37 +0000 Subject: [PATCH 174/201] build(deps): bump golang.org/x/crypto from 0.21.0 to 0.22.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.21.0 to 0.22.0. - [Commits](https://github.com/golang/crypto/compare/v0.21.0...v0.22.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 14 ++++++-------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 756a9aa3b..7360bf140 100644 --- a/go.mod +++ b/go.mod @@ -22,9 +22,9 @@ require ( go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca go.sia.tech/web/renterd v0.50.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.21.0 - golang.org/x/sys v0.18.0 - golang.org/x/term v0.18.0 + golang.org/x/crypto v0.22.0 + golang.org/x/sys v0.19.0 + golang.org/x/term v0.19.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.5.6 gorm.io/driver/sqlite v1.5.5 diff --git a/go.sum b/go.sum index 73169eae2..e5de1e9af 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,6 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 h1:jsiab6uAUkaeDL7XEseAxJw7NVhxLNoU2WaB0AHbgG8= -go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 h1:3tgQlTmop/OU5dTHnBmAdNIPgae67wRijaknBhmAOCg= go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= @@ -288,8 +286,8 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -347,16 +345,16 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210421210424-b80969c67360/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 0b16135aea3492204489154aa0a638460876cdb9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 01:28:54 +0000 Subject: [PATCH 175/201] build(deps): bump go.sia.tech/hostd Bumps [go.sia.tech/hostd](https://github.com/SiaFoundation/hostd) from 1.0.4-0.20240327150808-8c407121ad92 to 1.0.4. - [Release notes](https://github.com/SiaFoundation/hostd/releases) - [Commits](https://github.com/SiaFoundation/hostd/commits/v1.0.4) --- updated-dependencies: - dependency-name: go.sia.tech/hostd dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 756a9aa3b..ab7e7714b 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.1 - go.sia.tech/hostd v1.0.4-0.20240327150808-8c407121ad92 + go.sia.tech/hostd v1.0.4 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca diff --git a/go.sum b/go.sum index 73169eae2..437d60f7d 100644 --- a/go.sum +++ b/go.sum @@ -243,16 +243,14 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37 h1:jsiab6uAUkaeDL7XEseAxJw7NVhxLNoU2WaB0AHbgG8= -go.sia.tech/core v0.2.2-0.20240325122830-e781eaa57d37/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 h1:3tgQlTmop/OU5dTHnBmAdNIPgae67wRijaknBhmAOCg= go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.1 h1:8vtYH/B17NJ4GXLWiONfhwBrrmtJtYiofnO3PfjU298= go.sia.tech/gofakes3 v0.0.1/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= -go.sia.tech/hostd v1.0.4-0.20240327150808-8c407121ad92 h1:raFT28huR0i/njUr13hJElpso/Zk631gKq2Vkg27hYE= -go.sia.tech/hostd v1.0.4-0.20240327150808-8c407121ad92/go.mod h1:s1W4/Okfcs2rGM3sC7xL95HY+I/oJ0Dsix3zTER+hpQ= +go.sia.tech/hostd v1.0.4 h1:rFzuNJ7sSFQfdrTHKSNYyMX+wlHyei/vZcVbXmrUl6I= +go.sia.tech/hostd v1.0.4/go.mod h1:s1W4/Okfcs2rGM3sC7xL95HY+I/oJ0Dsix3zTER+hpQ= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= From 291c09a71bf9bd9a11959d840f03b351bfa00595 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 06:36:54 +0000 Subject: [PATCH 176/201] build(deps): bump go.sia.tech/core Bumps [go.sia.tech/core](https://github.com/SiaFoundation/core) from 0.2.2-0.20240404003127-f4248250d041 to 0.2.2. - [Commits](https://github.com/SiaFoundation/core/commits/v0.2.2) --- updated-dependencies: - dependency-name: go.sia.tech/core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ab7e7714b..d2c8edc3b 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/minio/minio-go/v7 v7.0.69 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe - go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 + go.sia.tech/core v0.2.2 go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.1 go.sia.tech/hostd v1.0.4 diff --git a/go.sum b/go.sum index 437d60f7d..16d3d2ff9 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,8 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041 h1:3tgQlTmop/OU5dTHnBmAdNIPgae67wRijaknBhmAOCg= -go.sia.tech/core v0.2.2-0.20240404003127-f4248250d041/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= +go.sia.tech/core v0.2.2 h1:33RJrt08o7KyUOY4tITH6ECmRq1lhtapqc/SncIF/2A= +go.sia.tech/core v0.2.2/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.1 h1:8vtYH/B17NJ4GXLWiONfhwBrrmtJtYiofnO3PfjU298= From 1491262492298ead8539ec2f948ab56224edfd0b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 15 Apr 2024 10:10:41 +0200 Subject: [PATCH 177/201] worker: move threadedUploadPackedSlabs out of bufferSizeLimitReached block --- internal/test/e2e/cluster.go | 6 +++++- worker/upload.go | 6 +++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 16b3acbfd..1a47a4e9a 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -19,6 +19,7 @@ import ( "go.sia.tech/jape" "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot" + "go.sia.tech/renterd/build" "go.sia.tech/renterd/bus" "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/node" @@ -421,7 +422,10 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { tt.OK(busClient.UpdateSetting(ctx, api.SettingS3Authentication, api.S3AuthenticationSettings{ V4Keypairs: map[string]string{test.S3AccessKeyID: test.S3SecretAccessKey}, })) - tt.OK(busClient.UpdateSetting(ctx, api.SettingUploadPacking, api.UploadPackingSettings{Enabled: enableUploadPacking})) + tt.OK(busClient.UpdateSetting(ctx, api.SettingUploadPacking, api.UploadPackingSettings{ + Enabled: enableUploadPacking, + SlabBufferMaxSizeSoft: build.DefaultUploadPackingSettings.SlabBufferMaxSizeSoft, + })) // Fund the bus. if funding { diff --git a/worker/upload.go b/worker/upload.go index 00c0a924b..144021a3c 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -202,11 +202,11 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra } } } - - // make sure there's a goroutine uploading the remainder of the packed slabs - go w.threadedUploadPackedSlabs(up.rs, up.contractSet, lockingPriorityBackgroundUpload) } + // make sure there's a goroutine uploading any packed slabs + go w.threadedUploadPackedSlabs(up.rs, up.contractSet, lockingPriorityBackgroundUpload) + return eTag, nil } From 8775da01122a863abb9f2715330f591686e97b13 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 15 Apr 2024 10:45:23 +0200 Subject: [PATCH 178/201] worker: respect ctx in UploadSector of testHost --- worker/host_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/worker/host_test.go b/worker/host_test.go index 5f7a5432c..79ebb940e 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -94,7 +94,10 @@ func (h *testHost) DownloadSector(ctx context.Context, w io.Writer, root types.H func (h *testHost) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error { h.AddSector(sectorRoot, sector) if h.uploadDelay > 0 { - time.Sleep(h.uploadDelay) + select { + case <-time.After(h.uploadDelay): + case <-ctx.Done(): + } } return nil } From 2699f548d8f935d68b15fb4a58032539a30b2b2e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 15 Apr 2024 11:00:13 +0200 Subject: [PATCH 179/201] worker: release all candidates --- worker/host_test.go | 1 + worker/upload.go | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/worker/host_test.go b/worker/host_test.go index 79ebb940e..dcc089154 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -97,6 +97,7 @@ func (h *testHost) UploadSector(ctx context.Context, sectorRoot types.Hash256, s select { case <-time.After(h.uploadDelay): case <-ctx.Done(): + return ctx.Err() } } return nil diff --git a/worker/upload.go b/worker/upload.go index deb3290df..eadedbc09 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -1054,13 +1054,6 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { // redundant sectors can't complete the upload if sector.isUploaded() { - // release the candidate - for _, candidate := range s.candidates { - if candidate.req == req { - candidate.req = nil - break - } - } return false, false } @@ -1074,6 +1067,13 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { // update uploaded sectors s.numUploaded++ + // release all other candidates for this sector + for _, candidate := range s.candidates { + if candidate.req != nil && candidate.req != req && candidate.req.sector.index == sector.index { + candidate.req = nil + } + } + // release memory s.mem.ReleaseSome(rhpv2.SectorSize) From 3425ee052b05b1d2bb8aa41d5711e75f37555464 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 15 Apr 2024 11:07:29 +0200 Subject: [PATCH 180/201] worker: remove from s.errs upon success --- worker/upload.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/worker/upload.go b/worker/upload.go index eadedbc09..1f2f8f008 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -1052,6 +1052,10 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { return false, false } + // remove an error for this host if it successfully uploaded another sector. + // This might happen if another host was faster and this one got reused. + delete(s.errs, req.hk) + // redundant sectors can't complete the upload if sector.isUploaded() { return false, false From 5e251a97aa9069aa14e03f3542b46318e0f12513 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 15 Apr 2024 11:17:49 +0200 Subject: [PATCH 181/201] worker: logging for untracked sectors --- worker/upload.go | 30 ++++++++++++++---------------- worker/uploader.go | 2 ++ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/worker/upload.go b/worker/upload.go index 1f2f8f008..7d8f30257 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -1046,18 +1046,23 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { } s.numInflight-- - // failed reqs can't complete the upload - if resp.err != nil { - s.errs[req.hk] = resp.err + // redundant sectors can't complete the upload + if sector.isUploaded() { + // release the candidate + for _, candidate := range s.candidates { + if candidate.req == req { + candidate.req = nil + break + } + } return false, false } - // remove an error for this host if it successfully uploaded another sector. - // This might happen if another host was faster and this one got reused. - delete(s.errs, req.hk) - - // redundant sectors can't complete the upload - if sector.isUploaded() { + // failed reqs can't complete the upload, we do this after the isUploaded + // check since any error returned for a redundant sector is probably a + // result of the sector ctx being closed + if resp.err != nil { + s.errs[req.hk] = resp.err return false, false } @@ -1071,13 +1076,6 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { // update uploaded sectors s.numUploaded++ - // release all other candidates for this sector - for _, candidate := range s.candidates { - if candidate.req != nil && candidate.req != req && candidate.req.sector.index == sector.index { - candidate.req = nil - } - } - // release memory s.mem.ReleaseSome(rhpv2.SectorSize) diff --git a/worker/uploader.go b/worker/uploader.go index 403accbc8..80bd2393b 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -139,6 +139,8 @@ outer: canceledOverdrive := req.done() && req.overdrive && err != nil if !canceledOverdrive && !isClosedStream(err) { u.trackSectorUpload(err, elapsed) + } else { + u.logger.Debugw("not tracking sector upload metric", zap.Error(err)) } } } From 3243333c27ba334ad61bf1f71328e2509809c984 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 15 Apr 2024 14:08:38 +0200 Subject: [PATCH 182/201] worker: address comments --- api/worker.go | 7 +++---- worker/worker.go | 4 ++-- worker/worker_test.go | 9 --------- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/api/worker.go b/api/worker.go index c0249135f..2908802f7 100644 --- a/api/worker.go +++ b/api/worker.go @@ -27,7 +27,7 @@ var ( // be scanned since it is on a private network. ErrHostOnPrivateNetwork = errors.New("host is on a private network") - // ErrMultiRangeNotSupported is returned by the worker API when a requesta + // ErrMultiRangeNotSupported is returned by the worker API when a request // tries to download multiple ranges at once. ErrMultiRangeNotSupported = errors.New("multipart ranges are not supported") ) @@ -280,9 +280,8 @@ func ParseContentRange(contentRange string) (ContentRange, error) { } func ParseDownloadRange(req *http.Request) (DownloadRange, error) { - // parse the request range - // we pass math.MaxInt64 since a range header in a request doesn't have a - // size + // parse the request range we pass math.MaxInt64 since a range header in a + // request doesn't have a size ranges, err := http_range.ParseRange(req.Header.Get("Range"), math.MaxInt64) if err != nil { return DownloadRange{}, err diff --git a/worker/worker.go b/worker/worker.go index 7dbbd233e..a79eeb2f3 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1650,14 +1650,14 @@ func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str } // upload - eTag, err := w.upload(ctx, bucket, path, r, contracts, []UploadOption{ + eTag, err := w.upload(ctx, bucket, path, r, contracts, WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithMimeType(opts.MimeType), WithPacking(up.UploadPacking), WithRedundancySettings(up.RedundancySettings), WithObjectUserMetadata(opts.Metadata), - }...) + ) if err != nil { w.logger.With(zap.Error(err)).With("path", path).With("bucket", bucket).Error("failed to upload object") if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errUploadInterrupted) && !errors.Is(err, context.Canceled) { diff --git a/worker/worker_test.go b/worker/worker_test.go index 637168950..706fae14e 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -3,10 +3,8 @@ package worker import ( "context" "fmt" - "testing" "time" - "github.com/gotd/contrib/http_range" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" @@ -136,10 +134,3 @@ func newTestSector() (*[rhpv2.SectorSize]byte, types.Hash256) { frand.Read(sector[:]) return §or, rhpv2.SectorRoot(§or) } - -func TestFoo(t *testing.T) { - fmt.Println(http_range.Range{ - Start: 1, - Length: 2, - }.ContentRange(100)) -} From 7385035adbe05a01a34598c3e513e4f425ef898f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 15 Apr 2024 14:23:54 +0200 Subject: [PATCH 183/201] worker: remove code duplication --- worker/worker.go | 98 ++++++++++++++++++++---------------------------- 1 file changed, 40 insertions(+), 58 deletions(-) diff --git a/worker/worker.go b/worker/worker.go index a79eeb2f3..cd3d545eb 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1607,35 +1607,8 @@ func (w *worker) HeadObject(ctx context.Context, bucket, path string, opts api.H } func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { - // return early if the bucket does not exist - _, err := w.bus.Bucket(ctx, bucket) - if err != nil { - return nil, fmt.Errorf("bucket '%s' not found; %w", bucket, err) - } - - // fetch the upload parameters - up, err := w.bus.UploadParams(ctx) - if err != nil { - return nil, fmt.Errorf("couldn't fetch upload parameters from bus: %w", err) - } else if opts.ContractSet != "" { - up.ContractSet = opts.ContractSet - } else if up.ContractSet == "" { - return nil, api.ErrContractSetNotSpecified - } - - // cancel the upload if consensus is not synced - if !up.ConsensusState.Synced { - return nil, api.ErrConsensusNotSynced - } - - // allow overriding the redundancy settings - if opts.MinShards != 0 { - up.RedundancySettings.MinShards = opts.MinShards - } - if opts.TotalShards != 0 { - up.RedundancySettings.TotalShards = opts.TotalShards - } - err = api.RedundancySettings{MinShards: up.RedundancySettings.MinShards, TotalShards: up.RedundancySettings.TotalShards}.Validate() + // prepare upload params + up, err := w.prepareUploadParams(ctx, bucket, opts.ContractSet, opts.MinShards, opts.TotalShards) if err != nil { return nil, err } @@ -1671,35 +1644,8 @@ func (w *worker) UploadObject(ctx context.Context, r io.Reader, bucket, path str } func (w *worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, bucket, path, uploadID string, partNumber int, opts api.UploadMultipartUploadPartOptions) (*api.UploadMultipartUploadPartResponse, error) { - // return early if the bucket does not exist - _, err := w.bus.Bucket(ctx, bucket) - if err != nil { - return nil, fmt.Errorf("bucket '%s' not found; %w", bucket, err) - } - - // fetch the upload parameters - up, err := w.bus.UploadParams(ctx) - if err != nil { - return nil, fmt.Errorf("couldn't fetch upload parameters from bus: %w", err) - } else if opts.ContractSet != "" { - up.ContractSet = opts.ContractSet - } else if up.ContractSet == "" { - return nil, api.ErrContractSetNotSpecified - } - - // cancel the upload if consensus is not synced - if !up.ConsensusState.Synced { - return nil, api.ErrConsensusNotSynced - } - - // allow overriding the redundancy settings - if opts.MinShards != 0 { - up.RedundancySettings.MinShards = opts.MinShards - } - if opts.TotalShards != 0 { - up.RedundancySettings.TotalShards = opts.TotalShards - } - err = api.RedundancySettings{MinShards: up.RedundancySettings.MinShards, TotalShards: up.RedundancySettings.TotalShards}.Validate() + // prepare upload params + up, err := w.prepareUploadParams(ctx, bucket, opts.ContractSet, opts.MinShards, opts.TotalShards) if err != nil { return nil, err } @@ -1752,3 +1698,39 @@ func (w *worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc ETag: eTag, }, nil } + +func (w *worker) prepareUploadParams(ctx context.Context, bucket string, contractSet string, minShards, totalShards int) (api.UploadParams, error) { + // return early if the bucket does not exist + _, err := w.bus.Bucket(ctx, bucket) + if err != nil { + return api.UploadParams{}, fmt.Errorf("bucket '%s' not found; %w", bucket, err) + } + + // fetch the upload parameters + up, err := w.bus.UploadParams(ctx) + if err != nil { + return api.UploadParams{}, fmt.Errorf("couldn't fetch upload parameters from bus: %w", err) + } else if contractSet != "" { + up.ContractSet = contractSet + } else if up.ContractSet == "" { + return api.UploadParams{}, api.ErrContractSetNotSpecified + } + + // cancel the upload if consensus is not synced + if !up.ConsensusState.Synced { + return api.UploadParams{}, api.ErrConsensusNotSynced + } + + // allow overriding the redundancy settings + if minShards != 0 { + up.RedundancySettings.MinShards = minShards + } + if totalShards != 0 { + up.RedundancySettings.TotalShards = totalShards + } + err = api.RedundancySettings{MinShards: up.RedundancySettings.MinShards, TotalShards: up.RedundancySettings.TotalShards}.Validate() + if err != nil { + return api.UploadParams{}, err + } + return up, nil +} From 8754787106cedbe733bf1c2f581d19f7544bb314 Mon Sep 17 00:00:00 2001 From: PJ Date: Mon, 15 Apr 2024 14:51:17 +0200 Subject: [PATCH 184/201] stores: fix fetchUsedContracts --- stores/metadata.go | 27 ++++++++------ stores/metadata_test.go | 80 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 11 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index ad4c7d496..56202ccf3 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1469,13 +1469,16 @@ func (s *SQLStore) isKnownContract(fcid types.FileContractID) bool { return found } -func fetchUsedContracts(tx *gorm.DB, usedContracts map[types.PublicKey]map[types.FileContractID]struct{}) (map[types.FileContractID]dbContract, error) { - fcids := make([]fileContractID, 0, len(usedContracts)) - for _, hostFCIDs := range usedContracts { +func fetchUsedContracts(tx *gorm.DB, usedContractsByHost map[types.PublicKey]map[types.FileContractID]struct{}) (map[types.FileContractID]dbContract, error) { + // flatten map to get all used contract ids + fcids := make([]fileContractID, 0, len(usedContractsByHost)) + for _, hostFCIDs := range usedContractsByHost { for fcid := range hostFCIDs { fcids = append(fcids, fileContractID(fcid)) } } + + // fetch all contracts, take into account renewals var contracts []dbContract err := tx.Model(&dbContract{}). Joins("Host"). @@ -1484,17 +1487,19 @@ func fetchUsedContracts(tx *gorm.DB, usedContracts map[types.PublicKey]map[types if err != nil { return nil, err } - fetchedContracts := make(map[types.FileContractID]dbContract, len(contracts)) + + // build map of used contracts + usedContracts := make(map[types.FileContractID]dbContract, len(contracts)) for _, c := range contracts { - // If a contract has been renewed, we add the renewed contract to the - // map using the old contract's id. - if _, renewed := usedContracts[types.PublicKey(c.Host.PublicKey)][types.FileContractID(c.RenewedFrom)]; renewed { - fetchedContracts[types.FileContractID(c.RenewedFrom)] = c - } else { - fetchedContracts[types.FileContractID(c.FCID)] = c + if _, used := usedContractsByHost[types.PublicKey(c.Host.PublicKey)][types.FileContractID(c.FCID)]; used { + usedContracts[types.FileContractID(c.FCID)] = c + } + if _, used := usedContractsByHost[types.PublicKey(c.Host.PublicKey)][types.FileContractID(c.RenewedFrom)]; used { + usedContracts[types.FileContractID(c.RenewedFrom)] = c } } - return fetchedContracts, nil + + return usedContracts, nil } func (s *SQLStore) RenameObject(ctx context.Context, bucket, keyOld, keyNew string, force bool) error { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index bdd955808..abda57b95 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4664,3 +4664,83 @@ func TestUpdateObjectParallel(t *testing.T) { close(c) wg.Wait() } + +// TestFetchUsedContracts is a unit test that verifies the functionality of +// fetchUsedContracts +func TestFetchUsedContracts(t *testing.T) { + // create store + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // add test host + hk1 := types.PublicKey{1} + err := ss.addTestHost(hk1) + if err != nil { + t.Fatal(err) + } + + // add test contract + fcid1 := types.FileContractID{1} + _, err = ss.addTestContract(fcid1, hk1) + if err != nil { + t.Fatal(err) + } + + // assert empty map returns no contracts + usedContracts := make(map[types.PublicKey]map[types.FileContractID]struct{}) + contracts, err := fetchUsedContracts(ss.db, usedContracts) + if err != nil { + t.Fatal(err) + } else if len(contracts) != 0 { + t.Fatal("expected 0 contracts", len(contracts)) + } + + // add an entry for fcid1 + usedContracts[hk1] = make(map[types.FileContractID]struct{}) + usedContracts[hk1][types.FileContractID{1}] = struct{}{} + + // assert we get the used contract + contracts, err = fetchUsedContracts(ss.db, usedContracts) + if err != nil { + t.Fatal(err) + } else if len(contracts) != 1 { + t.Fatal("expected 1 contract", len(contracts)) + } else if _, ok := contracts[fcid1]; !ok { + t.Fatal("contract not found") + } + + // renew the contract + fcid2 := types.FileContractID{2} + _, err = ss.addTestRenewedContract(fcid2, fcid1, hk1, 1) + if err != nil { + t.Fatal(err) + } + + // assert used contracts contains one entry and it points to the renewal + contracts, err = fetchUsedContracts(ss.db, usedContracts) + if err != nil { + t.Fatal(err) + } else if len(contracts) != 1 { + t.Fatal("expected 1 contract", len(contracts)) + } else if contract, ok := contracts[fcid1]; !ok { + t.Fatal("contract not found") + } else if contract.convert().ID != fcid2 { + t.Fatal("contract should point to the renewed contract") + } + + // add an entry for fcid2 + usedContracts[hk1][types.FileContractID{2}] = struct{}{} + + // assert used contracts now contains an entry for both contracts and both + // point to the renewed contract + contracts, err = fetchUsedContracts(ss.db, usedContracts) + if err != nil { + t.Fatal(err) + } else if len(contracts) != 2 { + t.Fatal("expected 2 contracts", len(contracts)) + } else if !reflect.DeepEqual(contracts[types.FileContractID{1}], contracts[types.FileContractID{2}]) { + t.Fatal("contracts should match") + } else if contracts[types.FileContractID{1}].convert().ID != fcid2 { + t.Fatal("contracts should point to the renewed contract") + } +} From 3f92cc4ddb89cc98b0b9ee3e68bf407d944e0229 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 16 Apr 2024 09:26:22 +0200 Subject: [PATCH 185/201] stores: implement review remarks --- stores/sql.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/stores/sql.go b/stores/sql.go index 320ba1cf6..b3c137ea0 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -525,7 +525,7 @@ func (ss *SQLStore) applyUpdates(force bool) error { return nil } -func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) error) (err error) { +func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) error) error { abortRetry := func(err error) bool { if err == nil || errors.Is(err, context.Canceled) || @@ -550,23 +550,24 @@ func (s *SQLStore) retryTransaction(ctx context.Context, fc func(tx *gorm.DB) er return false } + var err error attempts := len(s.retryTransactionIntervals) + 1 - for i := 1; i <= attempts; i++ { + for i := 0; i < attempts; i++ { // execute the transaction err = s.db.WithContext(ctx).Transaction(fc) - if err == nil || abortRetry(err) { - return + if abortRetry(err) { + return err } // if this was the last attempt, return the error - if i-1 == len(s.retryTransactionIntervals) { - s.logger.Warn(fmt.Sprintf("transaction attempt %d/%d failed, err: %v", i, attempts, err)) - return + if i == len(s.retryTransactionIntervals) { + s.logger.Warn(fmt.Sprintf("transaction attempt %d/%d failed, err: %v", i+1, attempts, err)) + return err } // log the failed attempt and sleep before retrying - interval := s.retryTransactionIntervals[i-1] - s.logger.Warn(fmt.Sprintf("transaction attempt %d/%d failed, retry in %v, err: %v", i, attempts, interval, err)) + interval := s.retryTransactionIntervals[i] + s.logger.Warn(fmt.Sprintf("transaction attempt %d/%d failed, retry in %v, err: %v", i+1, attempts, interval, err)) time.Sleep(interval) } return fmt.Errorf("retryTransaction failed: %w", err) From f8a4f19085a8671294fde3e5b59ec580fb27276f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 16 Apr 2024 10:55:28 +0200 Subject: [PATCH 186/201] worker: allow for downloading from hosts which are gouging as long as they are not download gouging --- internal/test/e2e/gouging_test.go | 62 +++++++++++++++++++++++++++++++ worker/host.go | 4 +- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index f5fa2d7fa..e62022bc4 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "io" "testing" "time" @@ -142,3 +143,64 @@ func TestHostMinVersion(t *testing.T) { return nil }) } + +func TestDownloadGouging(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + // create a new test cluster + cluster := newTestCluster(t, testClusterOptions{ + hosts: int(test.AutopilotConfig.Contracts.Amount), + logger: newTestLoggerCustom(zapcore.ErrorLevel), + }) + defer cluster.Shutdown() + + cfg := test.AutopilotConfig.Contracts + b := cluster.Bus + w := cluster.Worker + tt := cluster.tt + + // build a hosts map + hostsMap := make(map[string]*Host) + for _, h := range cluster.hosts { + hostsMap[h.PublicKey().String()] = h + } + + // upload and download some data, asserting we have a working contract set + data := make([]byte, rhpv2.SectorSize/12) + tt.OKAll(frand.Read(data)) + + // upload some data + path := fmt.Sprintf("data_%v", len(data)) + tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) + + // update the gouging settings to cause hosts to be gouging but not download gouging + gs := test.GougingSettings + gs.MaxContractPrice = types.NewCurrency64(1) + if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { + t.Fatal(err) + } + + // wait for hosts to drop out of the set + tt.Retry(100, 100*time.Millisecond, func() error { + contracts, err := b.Contracts(context.Background(), api.ContractsOpts{ContractSet: cfg.Set}) + tt.OK(err) + if len(contracts) > 0 { + return fmt.Errorf("still got contracts in set") + } + return nil + }) + + // download the data + tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, path, api.DownloadObjectOptions{})) + + // update the gouging settings to cause hosts to be download gouging + gs.MaxDownloadPrice = types.NewCurrency64(1) + if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { + t.Fatal(err) + } + + // downloading should fail now + tt.FailAll(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, path, api.DownloadObjectOptions{})) +} diff --git a/worker/host.go b/worker/host.go index 4f4e97496..6faba7b15 100644 --- a/worker/host.go +++ b/worker/host.go @@ -88,8 +88,8 @@ func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash2 if err != nil { return err } - if breakdown := gc.Check(nil, &hpt); breakdown.Gouging() { - return fmt.Errorf("%w: %v", errPriceTableGouging, breakdown) + if breakdown := gc.Check(nil, &hpt); breakdown.DownloadErr != "" { + return fmt.Errorf("%w: %v", errPriceTableGouging, breakdown.DownloadErr) } // return errBalanceInsufficient if balance insufficient From 76f324b5668ac4fd4991af5fd85b64d813a14743 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 16 Apr 2024 11:03:49 +0200 Subject: [PATCH 187/201] worker: use hardcoded 1H for account fund cost and account balance cost --- worker/host.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/worker/host.go b/worker/host.go index 6faba7b15..f092534a0 100644 --- a/worker/host.go +++ b/worker/host.go @@ -235,10 +235,11 @@ func (h *host) FundAccount(ctx context.Context, balance types.Currency, rev *typ } // check whether we have money left in the contract - if pt.FundAccountCost.Cmp(rev.ValidRenterPayout()) >= 0 { - return fmt.Errorf("insufficient funds to fund account: %v <= %v", rev.ValidRenterPayout(), pt.FundAccountCost) + cost := types.NewCurrency64(1) + if cost.Cmp(rev.ValidRenterPayout()) >= 0 { + return fmt.Errorf("insufficient funds to fund account: %v <= %v", rev.ValidRenterPayout(), cost) } - availableFunds := rev.ValidRenterPayout().Sub(pt.FundAccountCost) + availableFunds := rev.ValidRenterPayout().Sub(cost) // cap the deposit amount by the money that's left in the contract if deposit.Cmp(availableFunds) > 0 { @@ -246,7 +247,7 @@ func (h *host) FundAccount(ctx context.Context, balance types.Currency, rev *typ } // create the payment - amount := deposit.Add(pt.FundAccountCost) + amount := deposit.Add(cost) payment, err := payByContract(rev, amount, rhpv3.Account{}, h.renterKey) // no account needed for funding if err != nil { return err @@ -254,7 +255,7 @@ func (h *host) FundAccount(ctx context.Context, balance types.Currency, rev *typ // fund the account if err := RPCFundAccount(ctx, t, &payment, h.acc.id, pt.UID); err != nil { - return fmt.Errorf("failed to fund account with %v (excluding cost %v);%w", deposit, pt.FundAccountCost, err) + return fmt.Errorf("failed to fund account with %v (excluding cost %v);%w", deposit, cost, err) } // record the spend @@ -277,7 +278,7 @@ func (h *host) SyncAccount(ctx context.Context, rev *types.FileContractRevision) return h.acc.WithSync(ctx, func() (types.Currency, error) { var balance types.Currency err := h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) error { - payment, err := payByContract(rev, pt.AccountBalanceCost, h.acc.id, h.renterKey) + payment, err := payByContract(rev, types.NewCurrency64(1), h.acc.id, h.renterKey) if err != nil { return err } From ab518e9f1a1959b2ed74b0f96fc3ab00071d0bca Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 16 Apr 2024 11:28:11 +0200 Subject: [PATCH 188/201] e2e: fix TestGouging --- internal/test/e2e/gouging_test.go | 68 +------------------------------ 1 file changed, 2 insertions(+), 66 deletions(-) diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index e62022bc4..657ef6722 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -91,11 +91,8 @@ func TestGouging(t *testing.T) { // again, this is necessary for the host to be considered price gouging time.Sleep(defaultHostSettings.PriceTableValidity) - // download the data - should fail - buffer.Reset() - if err := w.DownloadObject(context.Background(), &buffer, api.DefaultBucketName, path, api.DownloadObjectOptions{}); err == nil { - t.Fatal("expected download to fail", err) - } + // download the data - should still work + tt.OKAll(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, path, api.DownloadObjectOptions{})) // try optimising gouging settings resp, err := cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, gs, test.RedundancySettings) @@ -143,64 +140,3 @@ func TestHostMinVersion(t *testing.T) { return nil }) } - -func TestDownloadGouging(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - // create a new test cluster - cluster := newTestCluster(t, testClusterOptions{ - hosts: int(test.AutopilotConfig.Contracts.Amount), - logger: newTestLoggerCustom(zapcore.ErrorLevel), - }) - defer cluster.Shutdown() - - cfg := test.AutopilotConfig.Contracts - b := cluster.Bus - w := cluster.Worker - tt := cluster.tt - - // build a hosts map - hostsMap := make(map[string]*Host) - for _, h := range cluster.hosts { - hostsMap[h.PublicKey().String()] = h - } - - // upload and download some data, asserting we have a working contract set - data := make([]byte, rhpv2.SectorSize/12) - tt.OKAll(frand.Read(data)) - - // upload some data - path := fmt.Sprintf("data_%v", len(data)) - tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{})) - - // update the gouging settings to cause hosts to be gouging but not download gouging - gs := test.GougingSettings - gs.MaxContractPrice = types.NewCurrency64(1) - if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { - t.Fatal(err) - } - - // wait for hosts to drop out of the set - tt.Retry(100, 100*time.Millisecond, func() error { - contracts, err := b.Contracts(context.Background(), api.ContractsOpts{ContractSet: cfg.Set}) - tt.OK(err) - if len(contracts) > 0 { - return fmt.Errorf("still got contracts in set") - } - return nil - }) - - // download the data - tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, path, api.DownloadObjectOptions{})) - - // update the gouging settings to cause hosts to be download gouging - gs.MaxDownloadPrice = types.NewCurrency64(1) - if err := b.UpdateSetting(context.Background(), api.SettingGouging, gs); err != nil { - t.Fatal(err) - } - - // downloading should fail now - tt.FailAll(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, path, api.DownloadObjectOptions{})) -} From 90308248f68b8cddf0b01dd31a1b02a0f2899154 Mon Sep 17 00:00:00 2001 From: PJ Date: Tue, 16 Apr 2024 12:44:02 +0200 Subject: [PATCH 189/201] testing: default db logger --- internal/test/e2e/cluster.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 1a47a4e9a..f77fa0922 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -32,6 +32,8 @@ import ( "lukechampine.com/frand" "go.sia.tech/renterd/worker" + gormlogger "gorm.io/gorm/logger" + "moul.io/zapgorm2" ) const ( @@ -240,6 +242,18 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { apSettings = *opts.autopilotSettings } + // default database logger + if busCfg.DBLogger == nil { + busCfg.DBLogger = zapgorm2.Logger{ + ZapLogger: logger.Named("SQL"), + LogLevel: gormlogger.Warn, + SlowThreshold: 100 * time.Millisecond, + SkipCallerLookup: false, + IgnoreRecordNotFoundError: true, + Context: nil, + } + } + // Check if we are testing against an external database. If so, we create a // database with a random name first. uri, user, password, _ := stores.DBConfigFromEnv() From 4dd89b0481faff566dd61492df973d26b42a1222 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 16 Apr 2024 14:28:16 +0200 Subject: [PATCH 190/201] api;stores;contractor: add stored data to api.Host type and make use of it in the contractor --- api/host.go | 1 + autopilot/contractor/contractor.go | 14 +++++------ autopilot/contractor/evaluate.go | 4 ++-- autopilot/contractor/hostfilter.go | 4 ++-- autopilot/contractor/hostscore.go | 4 ++-- autopilot/contractor/hostscore_test.go | 18 +++++++------- internal/test/e2e/cluster_test.go | 13 +++++++++- stores/hostdb.go | 33 +++++++++++++++++++------- 8 files changed, 59 insertions(+), 32 deletions(-) diff --git a/api/host.go b/api/host.go index e4d472495..4ad1f87a1 100644 --- a/api/host.go +++ b/api/host.go @@ -158,6 +158,7 @@ type ( Scanned bool `json:"scanned"` Blocked bool `json:"blocked"` Checks map[string]HostCheck `json:"checks"` + StoredData uint64 `json:"storedData"` } HostAddress struct { diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 00bbef98b..627ac92d1 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -284,10 +284,8 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro // compile map of stored data per host contractData := make(map[types.FileContractID]uint64) - hostData := make(map[types.PublicKey]uint64) for _, c := range contracts { contractData[c.ID] = c.FileSize() - hostData[c.HostKey] += c.FileSize() } // fetch all hosts @@ -310,7 +308,7 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro } // fetch candidate hosts - candidates, unusableHosts, err := c.candidateHosts(mCtx, hosts, usedHosts, hostData, minValidScore) // avoid 0 score hosts + candidates, unusableHosts, err := c.candidateHosts(mCtx, hosts, usedHosts, minValidScore) // avoid 0 score hosts if err != nil { return false, err } @@ -324,7 +322,7 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro } // run host checks - checks, err := c.runHostChecks(mCtx, hosts, hostData, minScore) + checks, err := c.runHostChecks(mCtx, hosts, minScore) if err != nil { return false, fmt.Errorf("failed to run host checks, err: %v", err) } @@ -742,7 +740,7 @@ LOOP: return toKeep, toArchive, toStopUsing, toRefresh, toRenew } -func (c *Contractor) runHostChecks(ctx *mCtx, hosts []api.Host, hostData map[types.PublicKey]uint64, minScore float64) (map[types.PublicKey]*api.HostCheck, error) { +func (c *Contractor) runHostChecks(ctx *mCtx, hosts []api.Host, minScore float64) (map[types.PublicKey]*api.HostCheck, error) { // fetch consensus state cs, err := c.bus.ConsensusState(ctx) if err != nil { @@ -756,7 +754,7 @@ func (c *Contractor) runHostChecks(ctx *mCtx, hosts []api.Host, hostData map[typ checks := make(map[types.PublicKey]*api.HostCheck) for _, h := range hosts { h.PriceTable.HostBlockHeight = cs.BlockHeight // ignore HostBlockHeight - checks[h.PublicKey] = checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore, hostData[h.PublicKey]) + checks[h.PublicKey] = checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore) } return checks, nil } @@ -1220,7 +1218,7 @@ func (c *Contractor) calculateMinScore(candidates []scoredHost, numContracts uin return minScore } -func (c *Contractor) candidateHosts(ctx *mCtx, hosts []api.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostsBreakdown, error) { +func (c *Contractor) candidateHosts(ctx *mCtx, hosts []api.Host, usedHosts map[types.PublicKey]struct{}, minScore float64) ([]scoredHost, unusableHostsBreakdown, error) { start := time.Now() // fetch consensus state @@ -1273,7 +1271,7 @@ func (c *Contractor) candidateHosts(ctx *mCtx, hosts []api.Host, usedHosts map[t // NOTE: ignore the pricetable's HostBlockHeight by setting it to our // own blockheight h.PriceTable.HostBlockHeight = cs.BlockHeight - hc := checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore, storedData[h.PublicKey]) + hc := checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore) if hc.Usability.IsUsable() { candidates = append(candidates, scoredHost{h, hc.Score.Score()}) continue diff --git a/autopilot/contractor/evaluate.go b/autopilot/contractor/evaluate.go index cc964b3d4..685cb4b70 100644 --- a/autopilot/contractor/evaluate.go +++ b/autopilot/contractor/evaluate.go @@ -9,7 +9,7 @@ import ( func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) { gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) for _, host := range hosts { - hc := checkHost(cfg, rs, gc, host, minValidScore, 0) + hc := checkHost(cfg, rs, gc, host, minValidScore) if hc.Usability.IsUsable() { usables++ } @@ -25,7 +25,7 @@ func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu resp.Hosts = uint64(len(hosts)) for _, host := range hosts { - hc := checkHost(cfg, rs, gc, host, 0, 0) + hc := checkHost(cfg, rs, gc, host, 0) if hc.Usability.IsUsable() { resp.Usable++ continue diff --git a/autopilot/contractor/hostfilter.go b/autopilot/contractor/hostfilter.go index bfc11b903..dc95b1386 100644 --- a/autopilot/contractor/hostfilter.go +++ b/autopilot/contractor/hostfilter.go @@ -236,7 +236,7 @@ func isUpForRenewal(cfg api.AutopilotConfig, r types.FileContractRevision, block } // checkHost performs a series of checks on the host. -func checkHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h api.Host, minScore float64, storedData uint64) *api.HostCheck { +func checkHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h api.Host, minScore float64) *api.HostCheck { if rs.Validate() != nil { panic("invalid redundancy settings were supplied - developer error") } @@ -278,7 +278,7 @@ func checkHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.Gou // not gouging, this because the core package does not have overflow // checks in its cost calculations needed to calculate the period // cost - sb = hostScore(cfg, h, storedData, rs.Redundancy()) + sb = hostScore(cfg, h, rs.Redundancy()) if sb.Score() < minScore { ub.LowScore = true } diff --git a/autopilot/contractor/hostscore.go b/autopilot/contractor/hostscore.go index 3a05a947a..51d8275fc 100644 --- a/autopilot/contractor/hostscore.go +++ b/autopilot/contractor/hostscore.go @@ -22,7 +22,7 @@ const ( minValidScore = math.SmallestNonzeroFloat64 ) -func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown { +func hostScore(cfg api.AutopilotConfig, h api.Host, expectedRedundancy float64) api.HostScoreBreakdown { cCfg := cfg.Contracts // idealDataPerHost is the amount of data that we would have to put on each // host assuming that our storage requirements were spread evenly across @@ -44,7 +44,7 @@ func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedR Collateral: collateralScore(cCfg, h.PriceTable.HostPriceTable, uint64(allocationPerHost)), Interactions: interactionScore(h), Prices: priceAdjustmentScore(hostPeriodCost, cCfg), - StorageRemaining: storageRemainingScore(h.Settings, storedData, allocationPerHost), + StorageRemaining: storageRemainingScore(h.Settings, h.StoredData, allocationPerHost), Uptime: uptimeScore(h), Version: versionScore(h.Settings, cfg.Hosts.MinProtocolVersion), } diff --git a/autopilot/contractor/hostscore_test.go b/autopilot/contractor/hostscore_test.go index 84f964692..ae1b7668e 100644 --- a/autopilot/contractor/hostscore_test.go +++ b/autopilot/contractor/hostscore_test.go @@ -42,13 +42,13 @@ func TestHostScore(t *testing.T) { // assert both hosts score equal redundancy := 3.0 - if hostScore(cfg, h1, 0, redundancy) != hostScore(cfg, h2, 0, redundancy) { + if hostScore(cfg, h1, redundancy) != hostScore(cfg, h2, redundancy) { t.Fatal("unexpected") } // assert age affects the score h1.KnownSince = time.Now().Add(-1 * day) - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } @@ -57,21 +57,21 @@ func TestHostScore(t *testing.T) { settings.Collateral = settings.Collateral.Div64(2) settings.MaxCollateral = settings.MaxCollateral.Div64(2) h1 = newHost(settings) // reset - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } // assert interactions affect the score h1 = newHost(test.NewHostSettings()) // reset h1.Interactions.SuccessfulInteractions++ - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } // assert uptime affects the score h2 = newHost(test.NewHostSettings()) // reset h2.Interactions.SecondToLastScanSuccess = false - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() || ageScore(h1) != ageScore(h2) { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() || ageScore(h1) != ageScore(h2) { t.Fatal("unexpected") } @@ -79,28 +79,28 @@ func TestHostScore(t *testing.T) { h2Settings := test.NewHostSettings() h2Settings.Version = "1.5.6" // lower h2 = newHost(h2Settings) // reset - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } // asseret remaining storage affects the score. h1 = newHost(test.NewHostSettings()) // reset h2.Settings.RemainingStorage = 100 - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } // assert MaxCollateral affects the score. h2 = newHost(test.NewHostSettings()) // reset h2.PriceTable.MaxCollateral = types.ZeroCurrency - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } // assert price affects the score. h2 = newHost(test.NewHostSettings()) // reset h2.PriceTable.WriteBaseCost = types.Siacoins(1) - if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() { + if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() { t.Fatal("unexpected") } } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 55db48c19..a91858aef 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -612,7 +612,7 @@ func TestUploadDownloadBasic(t *testing.T) { // mine a block to get the revisions mined. cluster.MineBlocks(1) - // check the revision height was updated. + // check the revision height and size were updated. tt.Retry(100, 100*time.Millisecond, func() error { // fetch the contracts. contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{}) @@ -623,10 +623,21 @@ func TestUploadDownloadBasic(t *testing.T) { for _, c := range contracts { if c.RevisionHeight == 0 { return errors.New("revision height should be > 0") + } else if c.Size != rhpv2.SectorSize { + return fmt.Errorf("size should be %v, got %v", rhpv2.SectorSize, c.Size) } } return nil }) + + // Check that stored data on hosts was updated + hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{}) + tt.OK(err) + for _, host := range hosts { + if host.StoredData != rhpv2.SectorSize { + t.Fatalf("stored data should be %v, got %v", rhpv2.SectorSize, host.StoredData) + } + } } // TestUploadDownloadExtended is an integration test that verifies objects can diff --git a/stores/hostdb.go b/stores/hostdb.go index fa93f85b9..0aa3ab0b2 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -255,7 +255,7 @@ func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" } func (dbBlocklistEntry) TableName() string { return "host_blocklist_entries" } // convert converts a host into a api.HostInfo -func (h dbHost) convert(blocked bool) api.Host { +func (h dbHost) convert(blocked bool, storedData uint64) api.Host { var lastScan time.Time if h.LastScan > 0 { lastScan = time.Unix(0, h.LastScan) @@ -283,11 +283,12 @@ func (h dbHost) convert(blocked bool) api.Host { HostPriceTable: h.PriceTable.convert(), Expiry: h.PriceTableExpiry.Time, }, - PublicKey: types.PublicKey(h.PublicKey), - Scanned: h.Scanned, - Settings: rhpv2.HostSettings(h.Settings), - Blocked: blocked, - Checks: checks, + PublicKey: types.PublicKey(h.PublicKey), + Scanned: h.Scanned, + Settings: rhpv2.HostSettings(h.Settings), + Blocked: blocked, + Checks: checks, + StoredData: storedData, } } @@ -571,9 +572,25 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us Preload("Blocklist") } + // fetch stored data for each host + var storedData []struct { + HostID uint + StoredData uint64 + } + err := ss.db.Raw("SELECT host_id, SUM(size) as StoredData FROM contracts GROUP BY host_id"). + Scan(&storedData). + Error + if err != nil { + return nil, fmt.Errorf("failed to fetch stored data: %w", err) + } + storedDataMap := make(map[uint]uint64) + for _, host := range storedData { + storedDataMap[host.HostID] = host.StoredData + } + var hosts []api.Host var fullHosts []dbHost - err := query. + err = query. Offset(offset). Limit(limit). FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error { @@ -584,7 +601,7 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us } else { blocked = filterMode == api.HostFilterModeBlocked } - hosts = append(hosts, fh.convert(blocked)) + hosts = append(hosts, fh.convert(blocked, storedDataMap[fh.ID])) } return nil }). From 5ce8bb1af4ab8c699124a0314ad07c86a8514d63 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 16 Apr 2024 17:20:50 +0200 Subject: [PATCH 191/201] contractor: don't register failed renewal alert if the host ran out of funds --- autopilot/contractor/contractor.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 00bbef98b..524049c36 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -14,6 +14,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + cwallet "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/utils" @@ -984,7 +985,16 @@ func (c *Contractor) runContractRenewals(ctx *mCtx, w Worker, toRenew []contract contract := toRenew[i].contract.ContractMetadata renewed, proceed, err := c.renewContract(ctx, w, toRenew[i], budget) if err != nil { - c.alerter.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) + // don't register an alert for hosts that are out of funds since the + // user can't do anything about it + if !(worker.IsErrHost(err) && utils.IsErr(err, cwallet.ErrNotEnoughFunds)) { + c.alerter.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) + } + c.logger.With(zap.Error(err)). + With("fcid", toRenew[i].contract.ID). + With("hostKey", toRenew[i].contract.HostKey). + With("proceed", proceed). + Errorw("failed to renew contract") if toRenew[i].usable { toKeep = append(toKeep, toRenew[i].contract.ContractMetadata) } From cfec73033ad862759ab128890bd863a60f3da9c7 Mon Sep 17 00:00:00 2001 From: ChrisSchinnerl Date: Tue, 16 Apr 2024 18:02:39 +0000 Subject: [PATCH 192/201] ui: v0.51.0 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8e6cd31d0..32d19a871 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.50.0 + go.sia.tech/web/renterd v0.51.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.22.0 golang.org/x/sys v0.19.0 diff --git a/go.sum b/go.sum index ae1db77fe..af6431ed2 100644 --- a/go.sum +++ b/go.sum @@ -259,8 +259,8 @@ go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQf go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= go.sia.tech/web v0.0.0-20240403135501-82ff3a2a3e7c h1:os2ZFJojHi0ckCNbr8c2GnWGm0ftvHkQUJOfBRGGIfk= go.sia.tech/web v0.0.0-20240403135501-82ff3a2a3e7c/go.mod h1:nGEhGmI8zV/BcC3LOCC5JLVYpidNYJIvLGIqVRWQBCg= -go.sia.tech/web/renterd v0.50.0 h1:Q955SDKAIej3vEr+P9nOjpgxCKaO+noTnOSUF30SGsc= -go.sia.tech/web/renterd v0.50.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= +go.sia.tech/web/renterd v0.51.0 h1:hQfq6vOMll2lseQMaK9tUtc6RscO3zgLOzhCk9myHTk= +go.sia.tech/web/renterd v0.51.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= From 1bea53efff32ba3e80435e5bb2317b07779b9728 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 17 Apr 2024 13:51:38 +0200 Subject: [PATCH 193/201] contractor: address comments --- autopilot/contractor/contractor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index 627ac92d1..a2a8e81e1 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -282,7 +282,7 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro usedHosts[contract.HostKey] = struct{}{} } - // compile map of stored data per host + // compile map of stored data per contract contractData := make(map[types.FileContractID]uint64) for _, c := range contracts { contractData[c.ID] = c.FileSize() From f54412af364ef36fc7d3d14868ed5ce97c3071d7 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 17 Apr 2024 14:51:56 +0200 Subject: [PATCH 194/201] bus: remove unused pruning cooldown config --- bus/client/client_test.go | 1 - cmd/renterd/main.go | 1 - internal/node/node.go | 1 - internal/test/e2e/cluster.go | 1 - 4 files changed, 4 deletions(-) diff --git a/bus/client/client_test.go b/bus/client/client_test.go index 9cd1e80e7..795439669 100644 --- a/bus/client/client_test.go +++ b/bus/client/client_test.go @@ -80,7 +80,6 @@ func newTestClient(dir string) (*client.Client, func() error, func(context.Conte }, Miner: node.NewMiner(client), SlabPruningInterval: time.Minute, - SlabPruningCooldown: time.Minute, }, filepath.Join(dir, "bus"), types.GeneratePrivateKey(), zap.New(zapcore.NewNopCore())) if err != nil { return nil, nil, nil, err diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 3005b4b34..c569b304b 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -436,7 +436,6 @@ func main() { Bus: cfg.Bus, Network: network, SlabPruningInterval: time.Hour, - SlabPruningCooldown: 30 * time.Second, } // Init db dialector if cfg.Database.MySQL.URI != "" { diff --git a/internal/node/node.go b/internal/node/node.go index d247f199a..293363653 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -45,7 +45,6 @@ type BusConfig struct { DBDialector gorm.Dialector DBMetricsDialector gorm.Dialector SlabPruningInterval time.Duration - SlabPruningCooldown time.Duration } type AutopilotConfig struct { diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 1a8c016c6..b102b6148 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -896,7 +896,6 @@ func testBusCfg() node.BusConfig { }, Network: testNetwork(), SlabPruningInterval: time.Second, - SlabPruningCooldown: 10 * time.Millisecond, } } From 7a703da862a67708c8a791f8aa57c02614739456 Mon Sep 17 00:00:00 2001 From: PJ Date: Wed, 17 Apr 2024 15:03:28 +0200 Subject: [PATCH 195/201] testing: use debug level logging in testing --- internal/test/e2e/cluster.go | 2 +- internal/test/e2e/cluster_test.go | 13 +++---------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 1a8c016c6..095c8141d 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -168,7 +168,7 @@ type testClusterOptions struct { // newTestLogger creates a console logger used for testing. func newTestLogger() *zap.Logger { - return newTestLoggerCustom(zapcore.ErrorLevel) + return newTestLoggerCustom(zapcore.DebugLevel) } // newTestLoggerCustom creates a console logger used for testing and allows diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index a91858aef..72ac4b95b 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -27,7 +27,6 @@ import ( "go.sia.tech/renterd/object" "go.sia.tech/renterd/wallet" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "lukechampine.com/frand" ) @@ -1453,9 +1452,7 @@ func TestWalletTransactions(t *testing.T) { t.SkipNow() } - cluster := newTestCluster(t, testClusterOptions{ - logger: newTestLoggerCustom(zapcore.DebugLevel), - }) + cluster := newTestCluster(t, clusterOptsDefault) defer cluster.Shutdown() b := cluster.Bus tt := cluster.tt @@ -1709,9 +1706,7 @@ func TestWallet(t *testing.T) { t.SkipNow() } - cluster := newTestCluster(t, testClusterOptions{ - logger: newTestLoggerCustom(zapcore.DebugLevel), - }) + cluster := newTestCluster(t, clusterOptsDefault) defer cluster.Shutdown() b := cluster.Bus tt := cluster.tt @@ -1916,9 +1911,7 @@ func TestAlerts(t *testing.T) { t.SkipNow() } - cluster := newTestCluster(t, testClusterOptions{ - logger: newTestLoggerCustom(zapcore.DebugLevel), - }) + cluster := newTestCluster(t, clusterOptsDefault) defer cluster.Shutdown() b := cluster.Bus tt := cluster.tt From b2d20f4f93095216c7d8d7ec1fbb0f84bd3b14fb Mon Sep 17 00:00:00 2001 From: Alex Freska Date: Wed, 17 Apr 2024 11:00:52 -0400 Subject: [PATCH 196/201] autopilot: add missing json tag to ConfigEvaluationResponse --- api/autopilot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/autopilot.go b/api/autopilot.go index 3938d42b7..9ca917f6e 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -123,7 +123,7 @@ type ( } `json:"gouging"` NotAcceptingContracts uint64 `json:"notAcceptingContracts"` NotScanned uint64 `json:"notScanned"` - } + } `json:"unusable"` Recommendation *ConfigRecommendation `json:"recommendation,omitempty"` } ) From ab1dc52a10c72a51eb160e3824a83f04f44c8651 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 18 Apr 2024 10:19:16 +0200 Subject: [PATCH 197/201] all: update ctx.Err() usage to context.Cause(ctx) --- stores/metadata.go | 2 +- worker/download.go | 2 +- worker/host_test.go | 2 +- worker/pricetables.go | 2 +- worker/rhpv2.go | 4 ++-- worker/rhpv3.go | 2 +- worker/upload.go | 2 +- worker/worker.go | 4 ++-- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 56202ccf3..87a63c7bc 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2050,7 +2050,7 @@ UPDATE objects SET health = ( } select { case <-ctx.Done(): - return ctx.Err() + return context.Cause(ctx) case <-time.After(time.Second): } } diff --git a/worker/download.go b/worker/download.go index 83d4bec3e..2e69f7375 100644 --- a/worker/download.go +++ b/worker/download.go @@ -731,7 +731,7 @@ loop: case <-s.mgr.shutdownCtx.Done(): return nil, false, errors.New("download stopped") case <-ctx.Done(): - return nil, false, ctx.Err() + return nil, false, context.Cause(ctx) case <-resps.c: resetOverdrive() } diff --git a/worker/host_test.go b/worker/host_test.go index dcc089154..cb88f1748 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -97,7 +97,7 @@ func (h *testHost) UploadSector(ctx context.Context, sectorRoot types.Hash256, s select { case <-time.After(h.uploadDelay): case <-ctx.Done(): - return ctx.Err() + return context.Cause(ctx) } } return nil diff --git a/worker/pricetables.go b/worker/pricetables.go index 9ca4b1541..c4c693e0a 100644 --- a/worker/pricetables.go +++ b/worker/pricetables.go @@ -137,7 +137,7 @@ func (p *priceTable) fetch(ctx context.Context, rev *types.FileContractRevision) } else if ongoing { select { case <-ctx.Done(): - return api.HostPriceTable{}, fmt.Errorf("%w; %w", errPriceTableUpdateTimedOut, ctx.Err()) + return api.HostPriceTable{}, fmt.Errorf("%w; %w", errPriceTableUpdateTimedOut, context.Cause(ctx)) case <-update.done: } return update.hpt, update.err diff --git a/worker/rhpv2.go b/worker/rhpv2.go index 7207b96fa..1a6bd3cfd 100644 --- a/worker/rhpv2.go +++ b/worker/rhpv2.go @@ -645,8 +645,8 @@ func (w *worker) withTransportV2(ctx context.Context, hostKey types.PublicKey, h }() defer func() { close(done) - if ctx.Err() != nil { - err = ctx.Err() + if context.Cause(ctx) != nil { + err = context.Cause(ctx) } }() t, err := rhpv2.NewRenterTransport(conn, hostKey) diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 22b75adc3..dc483c340 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -237,7 +237,7 @@ func dialTransport(ctx context.Context, siamuxAddr string, hostKey types.PublicK case <-ctx.Done(): conn.Close() <-done - return nil, ctx.Err() + return nil, context.Cause(ctx) case <-done: return t, err } diff --git a/worker/upload.go b/worker/upload.go index 718d717ab..4a97099bb 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -873,7 +873,7 @@ loop: case <-u.shutdownCtx.Done(): return nil, 0, 0, ErrShuttingDown case <-ctx.Done(): - return nil, 0, 0, ctx.Err() + return nil, 0, 0, context.Cause(ctx) case resp := <-respChan: // receive the response used, done = slab.receive(resp) diff --git a/worker/worker.go b/worker/worker.go index cd3d545eb..60dfb475d 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -1431,7 +1431,7 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // scan: second try select { case <-ctx.Done(): - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, ctx.Err() + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, context.Cause(ctx) case <-time.After(time.Second): } settings, pt, duration, err = scan() @@ -1449,7 +1449,7 @@ func (w *worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // repercussions select { case <-ctx.Done(): - return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, ctx.Err() + return rhpv2.HostSettings{}, rhpv3.HostPriceTable{}, 0, context.Cause(ctx) default: } From c0f807596018df1e42d2fbef2f8d1baa04dd28f4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 18 Apr 2024 17:27:22 +0200 Subject: [PATCH 198/201] go.mod: update gofakes3 dependency --- go.mod | 8 +- go.sum | 2163 ++++++++++++++++++++++++++++++++++++++++++++++- worker/s3/s3.go | 2 + 3 files changed, 2156 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 32d19a871..e10febd5a 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe go.sia.tech/core v0.2.2 go.sia.tech/coreutils v0.0.3 - go.sia.tech/gofakes3 v0.0.1 + go.sia.tech/gofakes3 v0.0.2 go.sia.tech/hostd v1.0.4 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 @@ -35,7 +35,7 @@ require ( require ( github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go v1.51.7 // indirect + github.com/aws/aws-sdk-go v1.51.21 // indirect github.com/cloudflare/cloudflare-go v0.91.0 // indirect github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -79,10 +79,10 @@ require ( gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect go.sia.tech/web v0.0.0-20240403135501-82ff3a2a3e7c // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.20.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect nhooyr.io/websocket v1.8.10 // indirect ) diff --git a/go.sum b/go.sum index af6431ed2..eb5afaf45 100644 --- a/go.sum +++ b/go.sum @@ -1,29 +1,1203 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accessapproval v1.7.2/go.mod h1:/gShiq9/kK/h8T/eEn1BTzalDvk0mZxJlhfw0p+Xuc0= +cloud.google.com/go/accessapproval v1.7.3/go.mod h1:4l8+pwIxGTNqSf4T3ds8nLO94NQf0W/KnMNuQ9PbnP8= +cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/accesscontextmanager v1.8.2/go.mod h1:E6/SCRM30elQJ2PKtFMs2YhfJpZSNcJyejhuzoId4Zk= +cloud.google.com/go/accesscontextmanager v1.8.3/go.mod h1:4i/JkF2JiFbhLnnpnfoTX5vRXfhf9ukhU1ANOTALTOQ= +cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.50.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= +cloud.google.com/go/aiplatform v1.51.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= +cloud.google.com/go/aiplatform v1.51.1/go.mod h1:kY3nIMAVQOK2XDqDPHaOuD9e+FdMA6OOpfBjsvaFSOo= +cloud.google.com/go/aiplatform v1.51.2/go.mod h1:hCqVYB3mY45w99TmetEoe8eCQEwZEp9WHxeZdcv9phw= +cloud.google.com/go/aiplatform v1.52.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.4/go.mod h1:zZgNCxLCy8b2rKKVfC1YkC2vTrpfZmeRCySM3aUbskA= +cloud.google.com/go/analytics v0.21.5/go.mod h1:BQtOBHWTlJ96axpPPnw5CvGJ6i3Ve/qX2fTxR8qWyr8= +cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigateway v1.6.2/go.mod h1:CwMC90nnZElorCW63P2pAYm25AtQrHfuOkbRSHj0bT8= +cloud.google.com/go/apigateway v1.6.3/go.mod h1:k68PXWpEs6BVDTtnLQAyG606Q3mz8pshItwPXjgv44Y= +cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeconnect v1.6.2/go.mod h1:s6O0CgXT9RgAxlq3DLXvG8riw8PYYbU/v25jqP3Dy18= +cloud.google.com/go/apigeeconnect v1.6.3/go.mod h1:peG0HFQ0si2bN15M6QSjEW/W7Gy3NYkWGz7pFz13cbo= +cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/apigeeregistry v0.7.2/go.mod h1:9CA2B2+TGsPKtfi3F7/1ncCCsL62NXBRfM6iPoGSM+8= +cloud.google.com/go/apigeeregistry v0.8.1/go.mod h1:MW4ig1N4JZQsXmBSwH4rwpgDonocz7FPBSw6XPGHmYw= +cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/appengine v1.8.2/go.mod h1:WMeJV9oZ51pvclqFN2PqHoGnys7rK0rz6s3Mp6yMvDo= +cloud.google.com/go/appengine v1.8.3/go.mod h1:2oUPZ1LVZ5EXi+AF1ihNAF+S8JrzQ3till5m9VQkrsk= +cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/area120 v0.8.2/go.mod h1:a5qfo+x77SRLXnCynFWPUZhnZGeSgvQ+Y0v1kSItkh4= +cloud.google.com/go/area120 v0.8.3/go.mod h1:5zj6pMzVTH+SVHljdSKC35sriR/CVvQZzG/Icdyriw0= +cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/artifactregistry v1.14.2/go.mod h1:Xk+QbsKEb0ElmyeMfdHAey41B+qBq3q5R5f5xD4XT3U= +cloud.google.com/go/artifactregistry v1.14.3/go.mod h1:A2/E9GXnsyXl7GUvQ/2CjHA+mVRoWAXC0brg2os+kNI= +cloud.google.com/go/artifactregistry v1.14.4/go.mod h1:SJJcZTMv6ce0LDMUnihCN7WSrI+kBSFV0KIKo8S8aYU= +cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/asset v1.15.0/go.mod h1:tpKafV6mEut3+vN9ScGvCHXHj7FALFVta+okxFECHcg= +cloud.google.com/go/asset v1.15.1/go.mod h1:yX/amTvFWRpp5rcFq6XbCxzKT8RJUam1UoboE179jU4= +cloud.google.com/go/asset v1.15.2/go.mod h1:B6H5tclkXvXz7PD22qCA2TDxSVQfasa3iDlM89O2NXs= +cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4= +cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs= +cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/automl v1.13.2/go.mod h1:gNY/fUmDEN40sP8amAX3MaXkxcqPIn7F1UIIPZpy4Mg= +cloud.google.com/go/automl v1.13.3/go.mod h1:Y8KwvyAZFOsMAPqUCfNu1AyclbC6ivCUF/MTwORymyY= +cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/baremetalsolution v1.2.0/go.mod h1:68wi9AwPYkEWIUT4SvSGS9UJwKzNpshjHsH4lzk8iOw= +cloud.google.com/go/baremetalsolution v1.2.1/go.mod h1:3qKpKIw12RPXStwQXcbhfxVj1dqQGEvcmA+SX/mUR88= +cloud.google.com/go/baremetalsolution v1.2.2/go.mod h1:O5V6Uu1vzVelYahKfwEWRMaS3AbCkeYHy3145s1FkhM= +cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/batch v1.4.1/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= +cloud.google.com/go/batch v1.5.0/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= +cloud.google.com/go/batch v1.5.1/go.mod h1:RpBuIYLkQu8+CWDk3dFD/t/jOCGuUpkpX+Y0n1Xccs8= +cloud.google.com/go/batch v1.6.1/go.mod h1:urdpD13zPe6YOK+6iZs/8/x2VBRofvblLpx0t57vM98= +cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.1/go.mod h1:zl/rWWAFVeV+kx+X2Javly7o1EIQThU4WlkynffL/lk= +cloud.google.com/go/beyondcorp v1.0.2/go.mod h1:m8cpG7caD+5su+1eZr+TSvF6r21NdLJk4f9u4SP2Ntc= +cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= +cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA= +cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/billing v1.17.0/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= +cloud.google.com/go/billing v1.17.1/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= +cloud.google.com/go/billing v1.17.2/go.mod h1:u/AdV/3wr3xoRBk5xvUzYMS1IawOAPwQMuHgHMdljDg= +cloud.google.com/go/billing v1.17.3/go.mod h1:z83AkoZ7mZwBGT3yTnt6rSGI1OOsHSIi6a5M3mJ8NaU= +cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/binaryauthorization v1.7.0/go.mod h1:Zn+S6QqTMn6odcMU1zDZCJxPjU2tZPV1oDl45lWY154= +cloud.google.com/go/binaryauthorization v1.7.1/go.mod h1:GTAyfRWYgcbsP3NJogpV3yeunbUIjx2T9xVeYovtURE= +cloud.google.com/go/binaryauthorization v1.7.2/go.mod h1:kFK5fQtxEp97m92ziy+hbu+uKocka1qRRL8MVJIgjv0= +cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/certificatemanager v1.7.2/go.mod h1:15SYTDQMd00kdoW0+XY5d9e+JbOPjp24AvF48D8BbcQ= +cloud.google.com/go/certificatemanager v1.7.3/go.mod h1:T/sZYuC30PTag0TLo28VedIRIj1KPGcOQzjWAptHa00= +cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/channel v1.17.0/go.mod h1:RpbhJsGi/lXWAUM1eF4IbQGbsfVlg2o8Iiy2/YLfVT0= +cloud.google.com/go/channel v1.17.1/go.mod h1:xqfzcOZAcP4b/hUDH0GkGg1Sd5to6di1HOJn/pi5uBQ= +cloud.google.com/go/channel v1.17.2/go.mod h1:aT2LhnftnyfQceFql5I/mP8mIbiiJS4lWqgXA815zMk= +cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.14.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.14.1/go.mod h1:K7wGc/3zfvmYWOWwYTgF/d/UVJhS4pu+HAy7PL7mCsU= +cloud.google.com/go/cloudbuild v1.14.2/go.mod h1:Bn6RO0mBYk8Vlrt+8NLrru7WXlQ9/RDWz2uo5KG1/sg= +cloud.google.com/go/cloudbuild v1.14.3/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/clouddms v1.7.0/go.mod h1:MW1dC6SOtI/tPNCciTsXtsGNEM0i0OccykPvv3hiYeM= +cloud.google.com/go/clouddms v1.7.1/go.mod h1:o4SR8U95+P7gZ/TX+YbJxehOCsM+fe6/brlrFquiszk= +cloud.google.com/go/clouddms v1.7.2/go.mod h1:Rk32TmWmHo64XqDvW7jgkFQet1tUKNVzs7oajtJT3jU= +cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.2/go.mod h1:A7nYkjNlW2gUoROg1kvJrQGhJP/38UaWwsnuBDOBVUk= +cloud.google.com/go/cloudtasks v1.12.3/go.mod h1:GPVXhIOSGEaR+3xT4Fp72ScI+HjHffSS4B8+BaBB5Ys= +cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= +cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.11.0/go.mod h1:hutBdImE4XNZ1NV4vbPJKSFOnQruhC5Lj9bZqWMTKiU= +cloud.google.com/go/contactcenterinsights v1.11.1/go.mod h1:FeNP3Kg8iteKM80lMwSk3zZZKVxr+PGnAId6soKuXwE= +cloud.google.com/go/contactcenterinsights v1.11.2/go.mod h1:A9PIR5ov5cRcd28KlDbmmXE8Aay+Gccer2h4wzkYFso= +cloud.google.com/go/contactcenterinsights v1.11.3/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.26.0/go.mod h1:YJCmRet6+6jnYYRS000T6k0D0xUXQgBSaJ7VwI8FBj4= +cloud.google.com/go/container v1.26.1/go.mod h1:5smONjPRUxeEpDG7bMKWfDL4sauswqEtnBK1/KKpR04= +cloud.google.com/go/container v1.26.2/go.mod h1:YlO84xCt5xupVbLaMY4s3XNE79MUJ+49VmkInr6HvF4= +cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/containeranalysis v0.11.0/go.mod h1:4n2e99ZwpGxpNcz+YsFT1dfOHPQFGcAC8FN2M2/ne/U= +cloud.google.com/go/containeranalysis v0.11.1/go.mod h1:rYlUOM7nem1OJMKwE1SadufX0JP3wnXj844EtZAwWLY= +cloud.google.com/go/containeranalysis v0.11.2/go.mod h1:xibioGBC1MD2j4reTyV1xY1/MvKaz+fyM9ENWhmIeP8= +cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= +cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= +cloud.google.com/go/datacatalog v1.18.0/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= +cloud.google.com/go/datacatalog v1.18.1/go.mod h1:TzAWaz+ON1tkNr4MOcak8EBHX7wIRX/gZKM+yTVsv+A= +cloud.google.com/go/datacatalog v1.18.2/go.mod h1:SPVgWW2WEMuWHA+fHodYjmxPiMqcOiWfhc9OD5msigk= +cloud.google.com/go/datacatalog v1.18.3/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataflow v0.9.2/go.mod h1:vBfdBZ/ejlTaYIGB3zB4T08UshH70vbtZeMD+urnUSo= +cloud.google.com/go/dataflow v0.9.3/go.mod h1:HI4kMVjcHGTs3jTHW/kv3501YW+eloiJSLxkJa/vqFE= +cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/dataform v0.8.2/go.mod h1:X9RIqDs6NbGPLR80tnYoPNiO1w0wenKTb8PxxlhTMKM= +cloud.google.com/go/dataform v0.8.3/go.mod h1:8nI/tvv5Fso0drO3pEjtowz58lodx8MVkdV2q0aPlqg= +cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datafusion v1.7.2/go.mod h1:62K2NEC6DRlpNmI43WHMWf9Vg/YvN6QVi8EVwifElI0= +cloud.google.com/go/datafusion v1.7.3/go.mod h1:eoLt1uFXKGBq48jy9LZ+Is8EAVLnmn50lNncLzwYokE= +cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/datalabeling v0.8.2/go.mod h1:cyDvGHuJWu9U/cLDA7d8sb9a0tWLEletStu2sTmg3BE= +cloud.google.com/go/datalabeling v0.8.3/go.mod h1:tvPhpGyS/V7lqjmb3V0TaDdGvhzgR1JoW7G2bpi2UTI= +cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.10.1/go.mod h1:1MzmBv8FvjYfc7vDdxhnLFNskikkB+3vl475/XdCDhs= +cloud.google.com/go/dataplex v1.10.2/go.mod h1:xdC8URdTrCrZMW6keY779ZT1cTOfV8KEPNsw+LTRT1Y= +cloud.google.com/go/dataplex v1.11.1/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataproc/v2 v2.2.0/go.mod h1:lZR7AQtwZPvmINx5J87DSOOpTfof9LVZju6/Qo4lmcY= +cloud.google.com/go/dataproc/v2 v2.2.1/go.mod h1:QdAJLaBjh+l4PVlVZcmrmhGccosY/omC1qwfQ61Zv/o= +cloud.google.com/go/dataproc/v2 v2.2.2/go.mod h1:aocQywVmQVF4i8CL740rNI/ZRpsaaC1Wh2++BJ7HEJ4= +cloud.google.com/go/dataproc/v2 v2.2.3/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/dataqna v0.8.2/go.mod h1:KNEqgx8TTmUipnQsScOoDpq/VlXVptUqVMZnt30WAPs= +cloud.google.com/go/dataqna v0.8.3/go.mod h1:wXNBW2uvc9e7Gl5k8adyAMnLush1KVV6lZUhB+rqNu4= +cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.14.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= +cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.1/go.mod h1:7ngSYwnw95YFyTd5tOGBxHlOZiL+OtpjheqU7t2/s/c= +cloud.google.com/go/datastream v1.10.2/go.mod h1:W42TFgKAs/om6x/CdXX5E4oiAsKlH+e8MTGy81zdYt0= +cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.1/go.mod h1:8jeadyLkH9qu9xgO3hVWw8jVr29N1mnW42gRJT8GY6g= +cloud.google.com/go/deploy v1.14.1/go.mod h1:N8S0b+aIHSEeSr5ORVoC0+/mOPUysVt8ae4QkZYolAw= +cloud.google.com/go/deploy v1.14.2/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.43.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= +cloud.google.com/go/dialogflow v1.44.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= +cloud.google.com/go/dialogflow v1.44.1/go.mod h1:n/h+/N2ouKOO+rbe/ZnI186xImpqvCVj2DdsWS/0EAk= +cloud.google.com/go/dialogflow v1.44.2/go.mod h1:QzFYndeJhpVPElnFkUXxdlptx0wPnBWLCBT9BvtC3/c= +cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/dlp v1.10.2/go.mod h1:ZbdKIhcnyhILgccwVDzkwqybthh7+MplGC3kZVZsIOQ= +cloud.google.com/go/dlp v1.10.3/go.mod h1:iUaTc/ln8I+QT6Ai5vmuwfw8fqTk2kaz0FvCwhLCom0= +cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.1/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= +cloud.google.com/go/documentai v1.23.0/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= +cloud.google.com/go/documentai v1.23.2/go.mod h1:Q/wcRT+qnuXOpjAkvOV4A+IeQl04q2/ReT7SSbytLSo= +cloud.google.com/go/documentai v1.23.4/go.mod h1:4MYAaEMnADPN1LPN5xboDR5QVB6AgsaxgFdJhitlE2Y= +cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/domains v0.9.2/go.mod h1:3YvXGYzZG1Temjbk7EyGCuGGiXHJwVNmwIf+E/cUp5I= +cloud.google.com/go/domains v0.9.3/go.mod h1:29k66YNDLDY9LCFKpGFeh6Nj9r62ZKm5EsUJxAl84KU= +cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= +cloud.google.com/go/edgecontainer v1.1.2/go.mod h1:wQRjIzqxEs9e9wrtle4hQPSR1Y51kqN75dgF7UllZZ4= +cloud.google.com/go/edgecontainer v1.1.3/go.mod h1:Ll2DtIABzEfaxaVSbwj3QHFaOOovlDFiWVDu349jSsA= +cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/essentialcontacts v1.6.3/go.mod h1:yiPCD7f2TkP82oJEFXFTou8Jl8L6LBRPeBEkTaO0Ggo= +cloud.google.com/go/essentialcontacts v1.6.4/go.mod h1:iju5Vy3d9tJUg0PYMd1nHhjV7xoCXaOAVabrwLaPBEM= +cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.1/go.mod h1:EqBxmGHFrruIara4FUQ3RHlgfCn7yo1HYsu2Hpt/C3Y= +cloud.google.com/go/eventarc v1.13.2/go.mod h1:X9A80ShVu19fb4e5sc/OLV7mpFUKZMwfJFeeWhcIObM= +cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/filestore v1.7.2/go.mod h1:TYOlyJs25f/omgj+vY7/tIG/E7BX369triSPzE4LdgE= +cloud.google.com/go/filestore v1.7.3/go.mod h1:Qp8WaEERR3cSkxToxFPHh/b8AACkSut+4qlCjAmKTV0= +cloud.google.com/go/filestore v1.7.4/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8= +cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/functions v1.15.2/go.mod h1:CHAjtcR6OU4XF2HuiVeriEdELNcnvRZSk1Q8RMqy4lE= +cloud.google.com/go/functions v1.15.3/go.mod h1:r/AMHwBheapkkySEhiZYLDBwVJCdlRwsm4ieJu35/Ug= +cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkebackup v1.3.1/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkebackup v1.3.2/go.mod h1:OMZbXzEJloyXMC7gqdSB+EOEQ1AKcpGYvO3s1ec5ixk= +cloud.google.com/go/gkebackup v1.3.3/go.mod h1:eMk7/wVV5P22KBakhQnJxWSVftL1p4VBFLpv0kIft7I= +cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkeconnect v0.8.2/go.mod h1:6nAVhwchBJYgQCXD2pHBFQNiJNyAd/wyxljpaa6ZPrY= +cloud.google.com/go/gkeconnect v0.8.3/go.mod h1:i9GDTrfzBSUZGCe98qSu1B8YB8qfapT57PenIb820Jo= +cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkehub v0.14.2/go.mod h1:iyjYH23XzAxSdhrbmfoQdePnlMj2EWcvnR+tHdBQsCY= +cloud.google.com/go/gkehub v0.14.3/go.mod h1:jAl6WafkHHW18qgq7kqcrXYzN08hXeK/Va3utN8VKg8= +cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.1/go.mod h1:AcrGoin6VLKT/fwZEYuqvVominLriQBCKmbjtnbMjG8= +cloud.google.com/go/gkemulticloud v1.0.2/go.mod h1:+ee5VXxKb3H1l4LZAcgWB/rvI16VTNTrInWxDjAGsGo= +cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/gsuiteaddons v1.6.2/go.mod h1:K65m9XSgs8hTF3X9nNTPi8IQueljSdYo9F+Mi+s4MyU= +cloud.google.com/go/gsuiteaddons v1.6.3/go.mod h1:sCFJkZoMrLZT3JTb8uJqgKPNshH2tfXeCwTFRebTq48= +cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= +cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/iap v1.9.0/go.mod h1:01OFxd1R+NFrg78S+hoPV5PxEzv22HXaNqUUlmNHFuY= +cloud.google.com/go/iap v1.9.1/go.mod h1:SIAkY7cGMLohLSdBR25BuIxO+I4fXJiL06IBL7cy/5Q= +cloud.google.com/go/iap v1.9.2/go.mod h1:GwDTOs047PPSnwRD0Us5FKf4WDRcVvHg1q9WVkKBhdI= +cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/ids v1.4.2/go.mod h1:3vw8DX6YddRu9BncxuzMyWn0g8+ooUjI2gslJ7FH3vk= +cloud.google.com/go/ids v1.4.3/go.mod h1:9CXPqI3GedjmkjbMWCUhMZ2P2N7TUMzAkVXYEH2orYU= +cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/iot v1.7.2/go.mod h1:q+0P5zr1wRFpw7/MOgDXrG/HVA+l+cSwdObffkrpnSg= +cloud.google.com/go/iot v1.7.3/go.mod h1:t8itFchkol4VgNbHnIq9lXoOOtHNR3uAACQMYbN9N4I= +cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= +cloud.google.com/go/kms v1.15.4/go.mod h1:L3Sdj6QTHK8dfwK5D1JLsAyELsNMnd3tAIwGS4ltKpc= +cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/language v1.11.0/go.mod h1:uDx+pFDdAKTY8ehpWbiXyQdz8tDSYLJbQcXsCkjYyvQ= +cloud.google.com/go/language v1.11.1/go.mod h1:Xyid9MG9WOX3utvDbpX7j3tXDmmDooMyMDqgUVpH17U= +cloud.google.com/go/language v1.12.1/go.mod h1:zQhalE2QlQIxbKIZt54IASBzmZpN/aDASea5zl1l+J4= +cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= +cloud.google.com/go/lifesciences v0.9.2/go.mod h1:QHEOO4tDzcSAzeJg7s2qwnLM2ji8IRpQl4p6m5Z9yTA= +cloud.google.com/go/lifesciences v0.9.3/go.mod h1:gNGBOJV80IWZdkd+xz4GQj4mbqaz737SCLHn2aRhQKM= +cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= +cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= +cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y= +cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/managedidentities v1.6.2/go.mod h1:5c2VG66eCa0WIq6IylRk3TBW83l161zkFvCj28X7jn8= +cloud.google.com/go/managedidentities v1.6.3/go.mod h1:tewiat9WLyFN0Fi7q1fDD5+0N4VUoL0SCX0OTCthZq4= +cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.1/go.mod h1:BxSa0BnW1g2U2gNdbq5zikLlHUuHW0GFWh7sgML2kIY= +cloud.google.com/go/maps v1.5.1/go.mod h1:NPMZw1LJwQZYCfz4y+EIw+SI+24A4bpdFJqdKVr0lt4= +cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/mediatranslation v0.8.2/go.mod h1:c9pUaDRLkgHRx3irYE5ZC8tfXGrMYwNZdmDqKMSfFp8= +cloud.google.com/go/mediatranslation v0.8.3/go.mod h1:F9OnXTy336rteOEywtY7FOqCk+J43o2RF638hkOQl4Y= +cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/memcache v1.10.2/go.mod h1:f9ZzJHLBrmd4BkguIAa/l/Vle6uTHzHokdnzSWOdQ6A= +cloud.google.com/go/memcache v1.10.3/go.mod h1:6z89A41MT2DVAW0P4iIRdu5cmRTsbsFn4cyiIx8gbwo= +cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.13.0/go.mod h1:URDhpG6XLeh5K+Glq0NOt74OfrPKTwS62gEPZzb5SOk= +cloud.google.com/go/metastore v1.13.1/go.mod h1:IbF62JLxuZmhItCppcIfzBBfUFq0DIB9HPDoLgWrVOU= +cloud.google.com/go/metastore v1.13.2/go.mod h1:KS59dD+unBji/kFebVp8XU/quNSyo8b6N6tPGspKszA= +cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= +cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4= +cloud.google.com/go/monitoring v1.16.2/go.mod h1:B44KGwi4ZCF8Rk/5n+FWeispDXoKSk9oss2QNlXJBgc= +cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkconnectivity v1.13.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= +cloud.google.com/go/networkconnectivity v1.14.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= +cloud.google.com/go/networkconnectivity v1.14.1/go.mod h1:LyGPXR742uQcDxZ/wv4EI0Vu5N6NKJ77ZYVnDe69Zug= +cloud.google.com/go/networkconnectivity v1.14.2/go.mod h1:5UFlwIisZylSkGG1AdwK/WZUaoz12PKu6wODwIbFzJo= +cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networkmanagement v1.9.0/go.mod h1:UTUaEU9YwbCAhhz3jEOHr+2/K/MrBk2XxOLS89LQzFw= +cloud.google.com/go/networkmanagement v1.9.1/go.mod h1:CCSYgrQQvW73EJawO2QamemYcOb57LvrDdDU51F0mcI= +cloud.google.com/go/networkmanagement v1.9.2/go.mod h1:iDGvGzAoYRghhp4j2Cji7sF899GnfGQcQRQwgVOWnDw= +cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/networksecurity v0.9.2/go.mod h1:jG0SeAttWzPMUILEHDUvFYdQTl8L/E/KC8iZDj85lEI= +cloud.google.com/go/networksecurity v0.9.3/go.mod h1:l+C0ynM6P+KV9YjOnx+kk5IZqMSLccdBqW6GUoF4p/0= +cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/notebooks v1.10.0/go.mod h1:SOPYMZnttHxqot0SGSFSkRrwE29eqnKPBJFqgWmiK2k= +cloud.google.com/go/notebooks v1.10.1/go.mod h1:5PdJc2SgAybE76kFQCWrTfJolCOUQXF97e+gteUUA6A= +cloud.google.com/go/notebooks v1.11.1/go.mod h1:V2Zkv8wX9kDCGRJqYoI+bQAaoVeE5kSiz4yYHd2yJwQ= +cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/optimization v1.5.0/go.mod h1:evo1OvTxeBRBu6ydPlrIRizKY/LJKo/drDMMRKqGEUU= +cloud.google.com/go/optimization v1.5.1/go.mod h1:NC0gnUD5MWVAF7XLdoYVPmYYVth93Q6BUzqAq3ZwtV8= +cloud.google.com/go/optimization v1.6.1/go.mod h1:hH2RYPTTM9e9zOiTaYPTiGPcGdNZVnBSBxjIAJzUkqo= +cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orchestration v1.8.2/go.mod h1:T1cP+6WyTmh6LSZzeUhvGf0uZVmJyTx7t8z7Vg87+A0= +cloud.google.com/go/orchestration v1.8.3/go.mod h1:xhgWAYqlbYjlz2ftbFghdyqENYW+JXuhBx9KsjMoGHs= +cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/orgpolicy v1.11.2/go.mod h1:biRDpNwfyytYnmCRWZWxrKF22Nkz9eNVj9zyaBdpm1o= +cloud.google.com/go/orgpolicy v1.11.3/go.mod h1:oKAtJ/gkMjum5icv2aujkP4CxROxPXsBbYGCDbPO8MM= +cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/osconfig v1.12.2/go.mod h1:eh9GPaMZpI6mEJEuhEjUJmaxvQ3gav+fFEJon1Y8Iw0= +cloud.google.com/go/osconfig v1.12.3/go.mod h1:L/fPS8LL6bEYUi1au832WtMnPeQNT94Zo3FwwV1/xGM= +cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/oslogin v1.11.0/go.mod h1:8GMTJs4X2nOAUVJiPGqIWVcDaF0eniEto3xlOxaboXE= +cloud.google.com/go/oslogin v1.11.1/go.mod h1:OhD2icArCVNUxKqtK0mcSmKL7lgr0LVlQz+v9s1ujTg= +cloud.google.com/go/oslogin v1.12.1/go.mod h1:VfwTeFJGbnakxAY236eN8fsnglLiVXndlbcNomY4iZU= +cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/phishingprotection v0.8.2/go.mod h1:LhJ91uyVHEYKSKcMGhOa14zMMWfbEdxG032oT6ECbC8= +cloud.google.com/go/phishingprotection v0.8.3/go.mod h1:3B01yO7T2Ra/TMojifn8EoGd4G9jts/6cIO0DgDY9J8= +cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/policytroubleshooter v1.9.0/go.mod h1:+E2Lga7TycpeSTj2FsH4oXxTnrbHJGRlKhVZBLGgU64= +cloud.google.com/go/policytroubleshooter v1.9.1/go.mod h1:MYI8i0bCrL8cW+VHN1PoiBTyNZTstCg2WUw2eVC4c4U= +cloud.google.com/go/policytroubleshooter v1.10.1/go.mod h1:5C0rhT3TDZVxAu8813bwmTvd57Phbl8mr9F4ipOsxEs= +cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/privatecatalog v0.9.2/go.mod h1:RMA4ATa8IXfzvjrhhK8J6H4wwcztab+oZph3c6WmtFc= +cloud.google.com/go/privatecatalog v0.9.3/go.mod h1:K5pn2GrVmOPjXz3T26mzwXLcKivfIJ9R5N79AFCF9UE= +cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.0/go.mod h1:QuE8EdU9dEnesG8/kG3XuJyNsjEqMlMzg3v3scCJ46c= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.1/go.mod h1:JZYZJOeZjgSSTGP4uz7NlQ4/d1w5hGmksVgM0lbEij0= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.2/go.mod h1:kpaDBOpkwD4G0GVMzG1W6Doy1tFFC97XAV3xy+Rd/pw= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.3/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommendationengine v0.8.2/go.mod h1:QIybYHPK58qir9CV2ix/re/M//Ty10OxjnnhWdaKS1Y= +cloud.google.com/go/recommendationengine v0.8.3/go.mod h1:m3b0RZV02BnODE9FeSvGv1qibFo8g0OnmB/RMwYy4V8= +cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/recommender v1.11.0/go.mod h1:kPiRQhPyTJ9kyXPCG6u/dlPLbYfFlkwHNRwdzPVAoII= +cloud.google.com/go/recommender v1.11.1/go.mod h1:sGwFFAyI57v2Hc5LbIj+lTwXipGu9NW015rkaEM5B18= +cloud.google.com/go/recommender v1.11.2/go.mod h1:AeoJuzOvFR/emIcXdVFkspVXVTYpliRCmKNYDnyBv6Y= +cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/redis v1.13.2/go.mod h1:0Hg7pCMXS9uz02q+LoEVl5dNHUkIQv+C/3L76fandSA= +cloud.google.com/go/redis v1.13.3/go.mod h1:vbUpCKUAZSYzFcWKmICnYgRAhTFg9r+djWqFxDYXi4U= +cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcemanager v1.9.2/go.mod h1:OujkBg1UZg5lX2yIyMo5Vz9O5hf7XQOSV7WxqxxMtQE= +cloud.google.com/go/resourcemanager v1.9.3/go.mod h1:IqrY+g0ZgLsihcfcmqSe+RKp1hzjXwG904B92AwBz6U= +cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/resourcesettings v1.6.2/go.mod h1:mJIEDd9MobzunWMeniaMp6tzg4I2GvD3TTmPkc8vBXk= +cloud.google.com/go/resourcesettings v1.6.3/go.mod h1:pno5D+7oDYkMWZ5BpPsb4SO0ewg3IXcmmrUZaMJrFic= +cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/retail v1.14.2/go.mod h1:W7rrNRChAEChX336QF7bnMxbsjugcOCPU44i5kbLiL8= +cloud.google.com/go/retail v1.14.3/go.mod h1:Omz2akDHeSlfCq8ArPKiBxlnRpKEBjUH386JYFLUvXo= +cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/run v1.3.0/go.mod h1:S/osX/4jIPZGg+ssuqh6GNgg7syixKe3YnprwehzHKU= +cloud.google.com/go/run v1.3.1/go.mod h1:cymddtZOzdwLIAsmS6s+Asl4JoXIDm/K1cpZTxV4Q5s= +cloud.google.com/go/run v1.3.2/go.mod h1:SIhmqArbjdU/D9M6JoHaAqnAMKLFtXaVdNeq04NjnVE= +cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/scheduler v1.10.2/go.mod h1:O3jX6HRH5eKCA3FutMw375XHZJudNIKVonSCHv7ropY= +cloud.google.com/go/scheduler v1.10.3/go.mod h1:8ANskEM33+sIbpJ+R4xRfw/jzOG+ZFE8WVLy7/yGvbc= +cloud.google.com/go/scheduler v1.10.4/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/secretmanager v1.11.2/go.mod h1:MQm4t3deoSub7+WNwiC4/tRYgDBHJgJPvswqQVB1Vss= +cloud.google.com/go/secretmanager v1.11.3/go.mod h1:0bA2o6FabmShrEy328i67aV+65XoUFFSmVeLBn/51jI= +cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/security v1.15.2/go.mod h1:2GVE/v1oixIRHDaClVbHuPcZwAqFM28mXuAKCfMgYIg= +cloud.google.com/go/security v1.15.3/go.mod h1:gQ/7Q2JYUZZgOzqKtw9McShH+MjNvtDpL40J1cT+vBs= +cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/securitycenter v1.23.1/go.mod h1:w2HV3Mv/yKhbXKwOCu2i8bCuLtNP1IMHuiYQn4HJq5s= +cloud.google.com/go/securitycenter v1.24.1/go.mod h1:3h9IdjjHhVMXdQnmqzVnM7b0wMn/1O/U20eWVpMpZjI= +cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.1/go.mod h1:tJywXimEWzNzw9FvtNjsQxxJ3/41jseeILgwU/QLrGI= +cloud.google.com/go/servicedirectory v1.11.2/go.mod h1:KD9hCLhncWRV5jJphwIpugKwM5bn1x0GyVVD4NO8mGg= +cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/shell v1.7.2/go.mod h1:KqRPKwBV0UyLickMn0+BY1qIyE98kKyI216sH/TuHmc= +cloud.google.com/go/shell v1.7.3/go.mod h1:cTTEz/JdaBsQAeTQ3B6HHldZudFoYBOqjteev07FbIc= +cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/spanner v1.49.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= +cloud.google.com/go/spanner v1.50.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= +cloud.google.com/go/spanner v1.51.0/go.mod h1:c5KNo5LQ1X5tJwma9rSQZsXNBDNvj4/n8BVc3LNahq0= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.1/go.mod h1:WcuaWz/3hOlzPFOVo9DUsblMIHwxP589y6ZMtaG+iAA= +cloud.google.com/go/speech v1.19.2/go.mod h1:2OYFfj+Ch5LWjsaSINuCZsre/789zlcCI3SY4oAi2oI= +cloud.google.com/go/speech v1.20.1/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/storagetransfer v1.10.1/go.mod h1:rS7Sy0BtPviWYTTJVWCSV4QrbBitgPeuK4/FKa4IdLs= +cloud.google.com/go/storagetransfer v1.10.2/go.mod h1:meIhYQup5rg9juQJdyppnA/WLQCOguxtk1pr3/vBWzA= +cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/talent v1.6.3/go.mod h1:xoDO97Qd4AK43rGjJvyBHMskiEf3KulgYzcH6YWOVoo= +cloud.google.com/go/talent v1.6.4/go.mod h1:QsWvi5eKeh6gG2DlBkpMaFYZYrYUnIpo34f6/V5QykY= +cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/texttospeech v1.7.2/go.mod h1:VYPT6aTOEl3herQjFHYErTlSZJ4vB00Q2ZTmuVgluD4= +cloud.google.com/go/texttospeech v1.7.3/go.mod h1:Av/zpkcgWfXlDLRYob17lqMstGZ3GqlvJXqKMp2u8so= +cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/tpu v1.6.2/go.mod h1:NXh3NDwt71TsPZdtGWgAG5ThDfGd32X1mJ2cMaRlVgU= +cloud.google.com/go/tpu v1.6.3/go.mod h1:lxiueqfVMlSToZY1151IaZqp89ELPSrk+3HIQ5HRkbY= +cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/trace v1.10.2/go.mod h1:NPXemMi6MToRFcSxRl2uDnu/qAlAQ3oULUphcHGh1vA= +cloud.google.com/go/trace v1.10.3/go.mod h1:Ke1bgfc73RV3wUFml+uQp7EsDw4dGaETLxB7Iq/r4CY= +cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.9.0/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.9.1/go.mod h1:TWIgDZknq2+JD4iRcojgeDtqGEp154HN/uL6hMvylS8= +cloud.google.com/go/translate v1.9.2/go.mod h1:E3Tc6rUTsQkVrXW6avbUhKJSr7ZE3j7zNmqzXKHqRrY= +cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.20.0/go.mod h1:U3G3FTnsvAGqglq9LxgqzOiBc/Nt8zis8S+850N2DUM= +cloud.google.com/go/video v1.20.1/go.mod h1:3gJS+iDprnj8SY6pe0SwLeC5BUW80NjhwX7INWEuWGU= +cloud.google.com/go/video v1.20.2/go.mod h1:lrixr5JeKNThsgfM9gqtwb6Okuqzfo4VrY2xynaViTA= +cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/videointelligence v1.11.2/go.mod h1:ocfIGYtIVmIcWk1DsSGOoDiXca4vaZQII1C85qtoplc= +cloud.google.com/go/videointelligence v1.11.3/go.mod h1:tf0NUaGTjU1iS2KEkGWvO5hRHeCkFK3nPo0/cOZhZAo= +cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vision/v2 v2.7.3/go.mod h1:V0IcLCY7W+hpMKXK1JYE0LV5llEqVmj+UJChjvA1WsM= +cloud.google.com/go/vision/v2 v2.7.4/go.mod h1:ynDKnsDN/0RtqkKxQZ2iatv3Dm9O+HfRb5djl7l4Vvw= +cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmmigration v1.7.2/go.mod h1:iA2hVj22sm2LLYXGPT1pB63mXHhrH1m/ruux9TwWLd8= +cloud.google.com/go/vmmigration v1.7.3/go.mod h1:ZCQC7cENwmSWlwyTrZcWivchn78YnFniEQYRWQ65tBo= +cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.1/go.mod h1:aT3Xsm5sNx0QShk1Jc1B8OddrxAScYLwzVoaiXfdzzk= +cloud.google.com/go/vmwareengine v1.0.2/go.mod h1:xMSNjIk8/itYrz1JA8nV3Ajg4L4n3N+ugP8JKzk3OaA= +cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/vpcaccess v1.7.2/go.mod h1:mmg/MnRHv+3e8FJUjeSibVFvQF1cCy2MsFaFqxeY1HU= +cloud.google.com/go/vpcaccess v1.7.3/go.mod h1:YX4skyfW3NC8vI3Fk+EegJnlYFatA+dXK4o236EUCUc= +cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/webrisk v1.9.2/go.mod h1:pY9kfDgAqxUpDBOrG4w8deLfhvJmejKB0qd/5uQIPBc= +cloud.google.com/go/webrisk v1.9.3/go.mod h1:RUYXe9X/wBDXhVilss7EDLW9ZNa06aowPuinUOPCXH8= +cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/websecurityscanner v1.6.2/go.mod h1:7YgjuU5tun7Eg2kpKgGnDuEOXWIrh8x8lWrJT4zfmas= +cloud.google.com/go/websecurityscanner v1.6.3/go.mod h1:x9XANObUFR+83Cya3g/B9M/yoHVqzxPnFtgF8yYGAXw= +cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= +cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM= +cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCwLM1P1tBRGHM= +cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc= +cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= -github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.21 h1:UrT6JC9R9PkYYXDZBV0qDKTualMr+bfK2eboTknMgbs= +github.com/aws/aws-sdk-go v1.51.21/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.91.0 h1:L7IR+86qrZuEMSjGFg4cwRwtHqC8uCPmMUkP7BD4CPw= github.com/cloudflare/cloudflare-go v0.91.0/go.mod h1:nUqvBUUDRxNzsDSQjbqUNWHEIYAoUlgRmcAzMKlFdKs= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -31,42 +1205,185 @@ github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf h1:K5VXW9LjmJv/xh github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf/go.mod h1:bXVurdTuvOiJu7NHALemFe0JMvC2UmwYHW+7fcZaZ2M= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gormigrate/gormigrate/v2 v2.1.2 h1:F/d1hpHbRAvKezziV2CC5KUE82cVe9zTgHSBoOOZ4CY= github.com/go-gormigrate/gormigrate/v2 v2.1.2/go.mod h1:9nHVX6z3FCMCQPA7PThGcA55t22yKQfK/Dnsf5i7hUo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= @@ -76,6 +1393,9 @@ github.com/gotd/contrib v0.19.0/go.mod h1:LzPxzRF0FvtpBt/WyODWQnPpk0tm/G9z6RHUoP github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -85,9 +1405,14 @@ github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXc github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -102,24 +1427,35 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid v1.2.2/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -127,14 +1463,24 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.69 h1:l8AnsQFyY1xiwa/DaQskY4NXSLA2yrGsW5iD9nRPVS0= @@ -153,27 +1499,47 @@ github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= @@ -186,7 +1552,10 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -194,9 +1563,19 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -204,8 +1583,15 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= gitlab.com/NebulousLabs/bolt v1.4.4 h1:3UhpR2qtHs87dJBE3CIzhw48GYSoUUNByJmic0cbu1w= gitlab.com/NebulousLabs/bolt v1.4.4/go.mod h1:ZL02cwhpLNif6aruxvUMqu/Bdy0/lFY21jMFfNAA+O8= gitlab.com/NebulousLabs/demotemutex v0.0.0-20151003192217-235395f71c40 h1:IbucNi8u1a1ErgVFVgg8pERhSyzYe5l+o8krDMnNjWA= @@ -242,13 +1628,25 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 h1:owERlK gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1:vIutAvl7lmJqLVYTCBY5WDdJomP+V74At8LCeEYoH8w= gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.sia.tech/core v0.2.2 h1:33RJrt08o7KyUOY4tITH6ECmRq1lhtapqc/SncIF/2A= go.sia.tech/core v0.2.2/go.mod h1:Zk7HaybEPgkPC1p6e6tTQr8PIeZClTgNcLNGYDLQJeE= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= -go.sia.tech/gofakes3 v0.0.1 h1:8vtYH/B17NJ4GXLWiONfhwBrrmtJtYiofnO3PfjU298= -go.sia.tech/gofakes3 v0.0.1/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= +go.sia.tech/gofakes3 v0.0.2 h1:oWnsYjHvSyf4ddtEH6XO76xeXC10N2cRqduI6B0d/EU= +go.sia.tech/gofakes3 v0.0.2/go.mod h1:+NLzpsL6M0WJvdRGL3q7SWo9O1DdBaBrPGm++Ue9WHo= go.sia.tech/hostd v1.0.4 h1:rFzuNJ7sSFQfdrTHKSNYyMX+wlHyei/vZcVbXmrUl6I= go.sia.tech/hostd v1.0.4/go.mod h1:s1W4/Okfcs2rGM3sC7xL95HY+I/oJ0Dsix3zTER+hpQ= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= @@ -277,122 +1675,798 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210421210424-b80969c67360/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= +google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= +google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= +google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= +google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= @@ -400,6 +2474,7 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -416,9 +2491,71 @@ gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.9 h1:wct0gxZIELDk8+ZqF/MVnHLkA1rvYlBWUMv2EdsK1g8= gorm.io/gorm v1.25.9/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= moul.io/zapgorm2 v1.3.0 h1:+CzUTMIcnafd0d/BvBce8T4uPn6DQnpIrz64cyixlkk= moul.io/zapgorm2 v1.3.0/go.mod h1:nPVy6U9goFKHR4s+zfSo1xVFaoU7Qgd5DoCdOfzoCqs= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/worker/s3/s3.go b/worker/s3/s3.go index 045fdf946..ee93edcf2 100644 --- a/worker/s3/s3.go +++ b/worker/s3/s3.go @@ -61,6 +61,8 @@ func (l *gofakes3Logger) Print(level gofakes3.LogLevel, v ...interface{}) { l.l.Warn(fmt.Sprint(v...)) case gofakes3.LogInfo: l.l.Info(fmt.Sprint(v...)) + case gofakes3.LogDebug: + l.l.Debug(fmt.Sprint(v...)) default: panic("unknown level") } From e121e3df1c922b7403bfbc991821e7afc8a74bc8 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 19 Apr 2024 10:54:56 +0200 Subject: [PATCH 199/201] autopilot: pass period instead of currentPeriod to gouging checker --- autopilot/autopilot.go | 6 +----- autopilot/contractor/evaluate.go | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 166c61d28..66b5792c2 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -195,10 +195,6 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { if jc.Check("failed to get recommended fee", err) != nil { return } - cfg, err := ap.Config(ctx) - if jc.Check("failed to get autopilot config", err) != nil { - return - } // fetch hosts hosts, err := ap.bus.SearchHosts(ctx, api.SearchHostOptions{Limit: -1, FilterMode: api.HostFilterModeAllowed}) @@ -207,7 +203,7 @@ func (ap *Autopilot) configHandlerPOST(jc jape.Context) { } // evaluate the config - jc.Encode(contractor.EvaluateConfig(reqCfg, cs, fee, cfg.CurrentPeriod, rs, gs, hosts)) + jc.Encode(contractor.EvaluateConfig(reqCfg, cs, fee, rs, gs, hosts)) } func (ap *Autopilot) Run() error { diff --git a/autopilot/contractor/evaluate.go b/autopilot/contractor/evaluate.go index 685cb4b70..642387ab5 100644 --- a/autopilot/contractor/evaluate.go +++ b/autopilot/contractor/evaluate.go @@ -6,8 +6,8 @@ import ( "go.sia.tech/renterd/worker" ) -func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) +func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, period uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) { + gc := worker.NewGougingChecker(gs, cs, fee, period, cfg.Contracts.RenewWindow) for _, host := range hosts { hc := checkHost(cfg, rs, gc, host, minValidScore) if hc.Usability.IsUsable() { @@ -20,8 +20,9 @@ func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types. // EvaluateConfig evaluates the given configuration and if the gouging settings // are too strict for the number of contracts required by 'cfg', it will provide // a recommendation on how to loosen it. -func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (resp api.ConfigEvaluationResponse) { - gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow) +func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (resp api.ConfigEvaluationResponse) { + period := cfg.Contracts.Period + gc := worker.NewGougingChecker(gs, cs, fee, period, cfg.Contracts.RenewWindow) resp.Hosts = uint64(len(hosts)) for _, host := range hosts { @@ -88,35 +89,35 @@ func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu // MaxRPCPrice tmpGS := maxGS() tmpGS.MaxRPCPrice = gs.MaxRPCPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxRPCPrice, cfg, cs, fee, period, rs, hosts) { optimisedGS.MaxRPCPrice = tmpGS.MaxRPCPrice success = true } // MaxContractPrice tmpGS = maxGS() tmpGS.MaxContractPrice = gs.MaxContractPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxContractPrice, cfg, cs, fee, period, rs, hosts) { optimisedGS.MaxContractPrice = tmpGS.MaxContractPrice success = true } // MaxDownloadPrice tmpGS = maxGS() tmpGS.MaxDownloadPrice = gs.MaxDownloadPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxDownloadPrice, cfg, cs, fee, period, rs, hosts) { optimisedGS.MaxDownloadPrice = tmpGS.MaxDownloadPrice success = true } // MaxUploadPrice tmpGS = maxGS() tmpGS.MaxUploadPrice = gs.MaxUploadPrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, currentPeriod, rs, hosts) { + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxUploadPrice, cfg, cs, fee, period, rs, hosts) { optimisedGS.MaxUploadPrice = tmpGS.MaxUploadPrice success = true } // MaxStoragePrice tmpGS = maxGS() tmpGS.MaxStoragePrice = gs.MaxStoragePrice - if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, currentPeriod, rs, hosts) { + if optimiseGougingSetting(&tmpGS, &tmpGS.MaxStoragePrice, cfg, cs, fee, period, rs, hosts) { optimisedGS.MaxStoragePrice = tmpGS.MaxStoragePrice success = true } From 7c1c040af27e8a75ada3a4dc4a062630c5f360b0 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 19 Apr 2024 11:03:06 +0200 Subject: [PATCH 200/201] e2e: extend TestGouging --- internal/test/e2e/gouging_test.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index 657ef6722..d8bb5015b 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -33,6 +33,9 @@ func TestGouging(t *testing.T) { w := cluster.Worker tt := cluster.tt + // mine enough blocks for the current period to become > period + cluster.MineBlocks(int(cfg.Period) * 2) + // build a hosts map hostsMap := make(map[string]*Host) for _, h := range cluster.hosts { @@ -99,11 +102,22 @@ func TestGouging(t *testing.T) { tt.OK(err) if resp.Recommendation == nil { t.Fatal("expected recommendation") + } else if resp.Unusable.Gouging.Gouging != 3 { + t.Fatalf("expected 3 gouging errors, got %v", resp.Unusable.Gouging) } // set optimised settings tt.OK(b.UpdateSetting(context.Background(), api.SettingGouging, resp.Recommendation.GougingSettings)) + // evaluate optimised settings + resp, err = cluster.Autopilot.EvaluateConfig(context.Background(), test.AutopilotConfig, resp.Recommendation.GougingSettings, test.RedundancySettings) + tt.OK(err) + if resp.Recommendation != nil { + t.Fatal("expected no recommendation") + } else if resp.Usable != 3 { + t.Fatalf("expected 3 usable hosts, got %v", resp.Usable) + } + // upload some data - should work now once contract maintenance is done tt.Retry(30, time.Second, func() error { _, err := w.UploadObject(context.Background(), bytes.NewReader(data), api.DefaultBucketName, path, api.UploadObjectOptions{}) From 4bc2aed6b9e5807fbc50b540f5332137f6435197 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 19 Apr 2024 15:55:34 +0200 Subject: [PATCH 201/201] e2e: fix TestGouging NDF --- internal/test/e2e/cluster.go | 11 ++++------- internal/test/e2e/gouging_test.go | 5 ++++- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index dca4390f5..552668079 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -689,18 +689,15 @@ func (c *TestCluster) AddHost(h *Host) { c.hosts = append(c.hosts, h) // Fund host from bus. - res, err := c.Bus.Wallet(context.Background()) - c.tt.OK(err) - - fundAmt := res.Confirmed.Div64(2).Div64(uint64(len(c.hosts))) // 50% of bus balance + fundAmt := types.Siacoins(100e3) var scos []types.SiacoinOutput for i := 0; i < 10; i++ { scos = append(scos, types.SiacoinOutput{ - Value: fundAmt.Div64(10), + Value: fundAmt, Address: h.WalletAddress(), }) } - c.tt.OK(c.Bus.SendSiacoins(context.Background(), scos, true)) + c.tt.OK(c.Bus.SendSiacoins(context.Background(), scos, false)) // Mine transaction. c.MineBlocks(1) @@ -720,7 +717,7 @@ func (c *TestCluster) AddHost(h *Host) { c.tt.Helper() c.MineBlocks(1) - _, err = c.Bus.Host(context.Background(), h.PublicKey()) + _, err := c.Bus.Host(context.Background(), h.PublicKey()) if err != nil { return err } diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index d8bb5015b..22e67b4ea 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -23,7 +23,6 @@ func TestGouging(t *testing.T) { // create a new test cluster cluster := newTestCluster(t, testClusterOptions{ - hosts: int(test.AutopilotConfig.Contracts.Amount), logger: newTestLoggerCustom(zapcore.ErrorLevel), }) defer cluster.Shutdown() @@ -36,6 +35,10 @@ func TestGouging(t *testing.T) { // mine enough blocks for the current period to become > period cluster.MineBlocks(int(cfg.Period) * 2) + // add hosts + tt.OKAll(cluster.AddHostsBlocking(int(test.AutopilotConfig.Contracts.Amount))) + cluster.WaitForAccounts() + // build a hosts map hostsMap := make(map[string]*Host) for _, h := range cluster.hosts {