diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8e4c21faff..e8a32e5ec1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -47,6 +47,16 @@ jobs: uses: n8maninger/action-golang-test@v1 with: args: "-race;-short" + - name: Test Stores - MySQL + if: matrix.os == 'ubuntu-latest' + uses: n8maninger/action-golang-test@v1 + env: + RENTERD_DB_URI: 127.0.0.1:3800 + RENTERD_DB_USER: root + RENTERD_DB_PASSWORD: test + with: + package: "./stores" + args: "-race;-short" - name: Test Integration uses: n8maninger/action-golang-test@v1 with: diff --git a/alerts/alerts.go b/alerts/alerts.go index 4d6463fa2d..6b009360d9 100644 --- a/alerts/alerts.go +++ b/alerts/alerts.go @@ -35,6 +35,7 @@ const ( type ( Alerter interface { + Alerts(_ context.Context, opts AlertsOpts) (resp AlertsResponse, err error) RegisterAlert(_ context.Context, a Alert) error DismissAlerts(_ context.Context, ids ...types.Hash256) error } @@ -63,8 +64,29 @@ type ( alerts map[types.Hash256]Alert webhookBroadcaster webhooks.Broadcaster } + + AlertsOpts struct { + Offset int + Limit int + Severity Severity + } + + AlertsResponse struct { + Alerts []Alert `json:"alerts"` + HasMore bool `json:"hasMore"` + Totals struct { + Info int `json:"info"` + Warning int `json:"warning"` + Error int `json:"error"` + Critical int `json:"critical"` + } `json:"totals"` + } ) +func (ar AlertsResponse) Total() int { + return ar.Totals.Info + ar.Totals.Warning + ar.Totals.Error + ar.Totals.Critical +} + // String implements the fmt.Stringer interface. func (s Severity) String() string { switch s { @@ -81,15 +103,8 @@ func (s Severity) String() string { } } -// MarshalJSON implements the json.Marshaler interface. -func (s Severity) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`%q`, s.String())), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (s *Severity) UnmarshalJSON(b []byte) error { - status := strings.Trim(string(b), `"`) - switch status { +func (s *Severity) LoadString(str string) error { + switch str { case severityInfoStr: *s = SeverityInfo case severityWarningStr: @@ -99,11 +114,21 @@ func (s *Severity) UnmarshalJSON(b []byte) error { case severityCriticalStr: *s = SeverityCritical default: - return fmt.Errorf("unrecognized severity: %v", status) + return fmt.Errorf("unrecognized severity: %v", str) } return nil } +// MarshalJSON implements the json.Marshaler interface. +func (s Severity) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`%q`, s.String())), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *Severity) UnmarshalJSON(b []byte) error { + return s.LoadString(strings.Trim(string(b), `"`)) +} + // RegisterAlert implements the Alerter interface. func (m *Manager) RegisterAlert(ctx context.Context, alert Alert) error { if alert.ID == (types.Hash256{}) { @@ -158,19 +183,46 @@ func (m *Manager) DismissAlerts(ctx context.Context, ids ...types.Hash256) error }) } -// Active returns the host's active alerts. -func (m *Manager) Active() []Alert { +// Alerts returns the host's active alerts. +func (m *Manager) Alerts(_ context.Context, opts AlertsOpts) (AlertsResponse, error) { m.mu.Lock() defer m.mu.Unlock() + offset, limit := opts.Offset, opts.Limit + resp := AlertsResponse{} + + if offset >= len(m.alerts) { + return resp, nil + } else if limit == -1 { + limit = len(m.alerts) + } + alerts := make([]Alert, 0, len(m.alerts)) for _, a := range m.alerts { + if a.Severity == SeverityInfo { + resp.Totals.Info++ + } else if a.Severity == SeverityWarning { + resp.Totals.Warning++ + } else if a.Severity == SeverityError { + resp.Totals.Error++ + } else if a.Severity == SeverityCritical { + resp.Totals.Critical++ + } + if opts.Severity != 0 && a.Severity != opts.Severity { + continue // filter by severity + } alerts = append(alerts, a) } sort.Slice(alerts, func(i, j int) bool { return alerts[i].Timestamp.After(alerts[j].Timestamp) }) - return alerts + alerts = alerts[offset:] + if limit < len(alerts) { + alerts = alerts[:limit] + resp.HasMore = true + } + resp.Alerts = alerts + return resp, nil } func (m *Manager) RegisterWebhookBroadcaster(b webhooks.Broadcaster) { @@ -204,6 +256,11 @@ func WithOrigin(alerter Alerter, origin string) Alerter { } } +// Alerts implements the Alerter interface. +func (a *originAlerter) Alerts(ctx context.Context, opts AlertsOpts) (resp AlertsResponse, err error) { + return a.alerter.Alerts(ctx, opts) +} + // RegisterAlert implements the Alerter interface. func (a *originAlerter) RegisterAlert(ctx context.Context, alert Alert) error { if alert.Data == nil { diff --git a/api/multipart.go b/api/multipart.go index 955b788491..a191b2b139 100644 --- a/api/multipart.go +++ b/api/multipart.go @@ -46,9 +46,10 @@ type ( } CreateMultipartOptions struct { - Key object.EncryptionKey - MimeType string - Metadata ObjectUserMetadata + GenerateKey bool + Key *object.EncryptionKey + MimeType string + Metadata ObjectUserMetadata } ) @@ -81,11 +82,15 @@ type ( } MultipartCreateRequest struct { - Bucket string `json:"bucket"` - Path string `json:"path"` - Key object.EncryptionKey `json:"key"` - MimeType string `json:"mimeType"` - Metadata ObjectUserMetadata `json:"metadata"` + Bucket string `json:"bucket"` + Path string `json:"path"` + Key *object.EncryptionKey `json:"key"` + MimeType string `json:"mimeType"` + Metadata ObjectUserMetadata `json:"metadata"` + + // TODO: The next major version change should invert this to create a + // key by default + GenerateKey bool `json:"generateKey"` } MultipartCreateResponse struct { diff --git a/api/object.go b/api/object.go index 73bb9c45cb..cef672a975 100644 --- a/api/object.go +++ b/api/object.go @@ -54,7 +54,7 @@ type ( Object struct { Metadata ObjectUserMetadata `json:"metadata,omitempty"` ObjectMetadata - object.Object + *object.Object } // ObjectMetadata contains various metadata about an object. @@ -119,6 +119,10 @@ type ( Mode string `json:"mode"` } + ObjectsStatsOpts struct { + Bucket string + } + // ObjectsStatsResponse is the response type for the /bus/stats/objects endpoint. ObjectsStatsResponse struct { NumObjects uint64 `json:"numObjects"` // number of objects @@ -208,13 +212,14 @@ type ( } GetObjectOptions struct { - Prefix string - Offset int - Limit int - IgnoreDelim bool - Marker string - SortBy string - SortDir string + Prefix string + Offset int + Limit int + IgnoreDelim bool + Marker string + OnlyMetadata bool + SortBy string + SortDir string } ListObjectOptions struct { @@ -231,20 +236,18 @@ type ( // UploadObjectOptions is the options type for the worker client. UploadObjectOptions struct { - Offset int - MinShards int - TotalShards int - ContractSet string - DisablePreshardingEncryption bool - ContentLength int64 - MimeType string - Metadata ObjectUserMetadata + Offset int + MinShards int + TotalShards int + ContractSet string + ContentLength int64 + MimeType string + Metadata ObjectUserMetadata } UploadMultipartUploadPartOptions struct { - DisablePreshardingEncryption bool - EncryptionOffset int - ContentLength int64 + EncryptionOffset *int + ContentLength int64 } ) @@ -264,9 +267,6 @@ func (opts UploadObjectOptions) ApplyValues(values url.Values) { if opts.MimeType != "" { values.Set("mimetype", opts.MimeType) } - if opts.DisablePreshardingEncryption { - values.Set("disablepreshardingencryption", "true") - } } func (opts UploadObjectOptions) ApplyHeaders(h http.Header) { @@ -276,11 +276,8 @@ func (opts UploadObjectOptions) ApplyHeaders(h http.Header) { } func (opts UploadMultipartUploadPartOptions) Apply(values url.Values) { - if opts.DisablePreshardingEncryption { - values.Set("disablepreshardingencryption", "true") - } - if !opts.DisablePreshardingEncryption || opts.EncryptionOffset != 0 { - values.Set("offset", fmt.Sprint(opts.EncryptionOffset)) + if opts.EncryptionOffset != nil { + values.Set("offset", fmt.Sprint(*opts.EncryptionOffset)) } } @@ -320,6 +317,9 @@ func (opts GetObjectOptions) Apply(values url.Values) { if opts.Marker != "" { values.Set("marker", opts.Marker) } + if opts.OnlyMetadata { + values.Set("onlymetadata", "true") + } if opts.SortBy != "" { values.Set("sortBy", opts.SortBy) } diff --git a/api/setting.go b/api/setting.go index e5fd6da775..8e617047d5 100644 --- a/api/setting.go +++ b/api/setting.go @@ -128,12 +128,12 @@ func (rs RedundancySettings) Redundancy() float64 { // SlabSize returns the size of a slab. func (rs RedundancySettings) SlabSize() uint64 { - return uint64(rs.MinShards) * rhpv2.SectorSize + return uint64(rs.TotalShards) * rhpv2.SectorSize } -// SlabSizeWithRedundancy returns the size of a slab with redundancy. -func (rs RedundancySettings) SlabSizeWithRedundancy() uint64 { - return uint64(rs.TotalShards) * rhpv2.SectorSize +// SlabSizeNoRedundancy returns the size of a slab without added redundancy. +func (rs RedundancySettings) SlabSizeNoRedundancy() uint64 { + return uint64(rs.MinShards) * rhpv2.SectorSize } // Validate returns an error if the redundancy settings are not considered diff --git a/autopilot/alerts.go b/autopilot/alerts.go index 7b42991e12..f4762c4d4b 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -14,12 +14,13 @@ import ( ) var ( - alertAccountRefillID = frand.Entropy256() // constant until restarted - alertLostSectorsID = frand.Entropy256() // constant until restarted - alertLowBalanceID = frand.Entropy256() // constant until restarted - alertMigrationID = frand.Entropy256() // constant until restarted - alertPruningID = frand.Entropy256() // constant until restarted - alertRenewalFailedID = frand.Entropy256() // constant until restarted + alertAccountRefillID = randomAlertID() // constant until restarted + alertChurnID = randomAlertID() // constant until restarted + alertLostSectorsID = randomAlertID() // constant until restarted + alertLowBalanceID = randomAlertID() // constant until restarted + alertMigrationID = randomAlertID() // constant until restarted + alertPruningID = randomAlertID() // constant until restarted + alertRenewalFailedID = randomAlertID() // constant until restarted ) func alertIDForAccount(alertID [32]byte, id rhpv3.Account) types.Hash256 { @@ -48,12 +49,26 @@ func (ap *Autopilot) RegisterAlert(ctx context.Context, a alerts.Alert) { } } -func (ap *Autopilot) DismissAlert(ctx context.Context, id types.Hash256) { - if err := ap.alerts.DismissAlerts(ctx, id); err != nil { +func (ap *Autopilot) DismissAlert(ctx context.Context, ids ...types.Hash256) { + if err := ap.alerts.DismissAlerts(ctx, ids...); err != nil { ap.logger.Errorf("failed to dismiss alert: %v", err) } } +func (ap *Autopilot) HasAlert(ctx context.Context, id types.Hash256) bool { + ar, err := ap.alerts.Alerts(ctx, alerts.AlertsOpts{Offset: 0, Limit: -1}) + if err != nil { + ap.logger.Errorf("failed to fetch alerts: %v", err) + return false + } + for _, alert := range ar.Alerts { + if alert.ID == id { + return true + } + } + return false +} + func newAccountLowBalanceAlert(address types.Address, balance, allowance types.Currency, bh, renewWindow, endHeight uint64) alerts.Alert { severity := alerts.SeverityInfo if bh+renewWindow/2 >= endHeight { @@ -137,27 +152,6 @@ func newContractPruningFailedAlert(hk types.PublicKey, version string, fcid type } } -func newContractSetChangeAlert(name string, added, removed int, removedReasons map[string]string) alerts.Alert { - var hint string - if removed > 0 { - hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." - } - - return alerts.Alert{ - ID: randomAlertID(), - Severity: alerts.SeverityInfo, - Message: "Contract set changed", - Data: map[string]any{ - "name": name, - "added": added, - "removed": removed, - "removals": removedReasons, - "hint": hint, - }, - Timestamp: time.Now(), - } -} - func newLostSectorsAlert(hk types.PublicKey, lostSectors uint64) alerts.Alert { return alerts.Alert{ ID: alertIDForHost(alertLostSectorsID, hk), diff --git a/autopilot/churn.go b/autopilot/churn.go new file mode 100644 index 0000000000..fdc1a0f54f --- /dev/null +++ b/autopilot/churn.go @@ -0,0 +1,68 @@ +package autopilot + +import ( + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" +) + +type ( + accumulatedChurn struct { + additions map[types.FileContractID]contractSetAdditions + removals map[types.FileContractID]contractSetRemovals + } +) + +func newAccumulatedChurn() *accumulatedChurn { + return &accumulatedChurn{ + additions: make(map[types.FileContractID]contractSetAdditions), + removals: make(map[types.FileContractID]contractSetRemovals), + } +} + +func (c *accumulatedChurn) Alert(name string) alerts.Alert { + var hint string + if len(c.removals) > 0 { + hint = "A high churn rate can lead to a lot of unnecessary migrations, it might be necessary to tweak your configuration depending on the reason hosts are being discarded from the set." + } + + return alerts.Alert{ + ID: alertChurnID, + Severity: alerts.SeverityInfo, + Message: "Contract set changed", + Data: map[string]any{ + "name": name, + "set_additions": c.additions, + "set_removals": c.removals, + "hint": hint, + }, + Timestamp: time.Now(), + } +} + +func (c *accumulatedChurn) Apply(additions map[types.FileContractID]contractSetAdditions, removals map[types.FileContractID]contractSetRemovals) { + for fcid, a := range additions { + if _, exists := c.additions[fcid]; !exists { + c.additions[fcid] = a + } else { + additions := c.additions[fcid] + additions.Additions = append(additions.Additions, a.Additions...) + c.additions[fcid] = additions + } + } + for fcid, r := range removals { + if _, exists := c.removals[fcid]; !exists { + c.removals[fcid] = r + } else { + removals := c.removals[fcid] + removals.Removals = append(removals.Removals, r.Removals...) + c.removals[fcid] = removals + } + } +} + +func (c *accumulatedChurn) Reset() { + c.additions = make(map[types.FileContractID]contractSetAdditions) + c.removals = make(map[types.FileContractID]contractSetRemovals) +} diff --git a/autopilot/contractor.go b/autopilot/contractor.go index adad5d1b7c..9e2b52ccab 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -85,6 +85,7 @@ const ( type ( contractor struct { ap *Autopilot + churn *accumulatedChurn resolver *ipResolver logger *zap.SugaredLogger @@ -122,9 +123,30 @@ type ( recoverable bool } + contractSetAdditions struct { + HostKey types.PublicKey `json:"hostKey"` + Additions []contractSetAddition `json:"additions"` + } + + contractSetAddition struct { + Size uint64 `json:"size"` + Time api.TimeRFC3339 `json:"time"` + } + + contractSetRemovals struct { + HostKey types.PublicKey `json:"hostKey"` + Removals []contractSetRemoval `json:"removals"` + } + + contractSetRemoval struct { + Size uint64 `json:"size"` + Reason string `json:"reasons"` + Time api.TimeRFC3339 `json:"time"` + } + renewal struct { - from types.FileContractID - to types.FileContractID + from api.ContractMetadata + to api.ContractMetadata ci contractInfo } ) @@ -132,6 +154,7 @@ type ( func newContractor(ap *Autopilot, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *contractor { return &contractor{ ap: ap, + churn: newAccumulatedChurn(), logger: ap.logger.Named("contractor"), revisionBroadcastInterval: revisionBroadcastInterval, @@ -231,11 +254,17 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // check if any used hosts have lost data to warn the user + var toDismiss []types.Hash256 for _, h := range hosts { if h.Interactions.LostSectors > 0 { c.ap.RegisterAlert(ctx, newLostSectorsAlert(h.PublicKey, h.Interactions.LostSectors)) + } else { + toDismiss = append(toDismiss, alertIDForHost(alertLostSectorsID, h.PublicKey)) } } + if len(toDismiss) > 0 { + c.ap.DismissAlert(ctx, toDismiss...) + } // fetch candidate hosts candidates, unusableHosts, err := c.candidateHosts(ctx, hosts, usedHosts, hostData, math.SmallestNonzeroFloat64) // avoid 0 score hosts @@ -325,17 +354,15 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( // set afterwards var renewed []renewal if limit > 0 { - var toKeep []contractInfo + var toKeep []api.ContractMetadata renewed, toKeep = c.runContractRenewals(ctx, w, toRenew, &remaining, limit) for _, ri := range renewed { if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) } - contractData[ri.to] = contractData[ri.from] - } - for _, ci := range toKeep { - updatedSet = append(updatedSet, ci.contract.ID) + contractData[ri.to.ID] = contractData[ri.from.ID] } + updatedSet = append(updatedSet, toKeep...) } // run contract refreshes @@ -347,7 +374,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( if ri.ci.usable || ri.ci.recoverable { updatedSet = append(updatedSet, ri.to) } - contractData[ri.to] = contractData[ri.from] + contractData[ri.to.ID] = contractData[ri.from.ID] } } @@ -360,7 +387,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } // check if we need to form contracts and add them to the contract set - var formed []types.FileContractID + var formed []api.ContractMetadata if uint64(len(updatedSet)) < threshold { // no need to try and form contracts if wallet is completely empty wallet, err := c.ap.bus.Wallet(ctx) @@ -376,34 +403,40 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( } else { for _, fc := range formed { updatedSet = append(updatedSet, fc) - contractData[fc] = 0 + contractData[fc.ID] = 0 } } } } // cap the amount of contracts we want to keep to the configured amount - for _, fcid := range updatedSet { - if _, exists := contractData[fcid]; !exists { - c.logger.Errorf("contract %v not found in contractData", fcid) + for _, contract := range updatedSet { + if _, exists := contractData[contract.ID]; !exists { + c.logger.Errorf("contract %v not found in contractData", contract.ID) } } if len(updatedSet) > int(state.cfg.Contracts.Amount) { // sort by contract size sort.Slice(updatedSet, func(i, j int) bool { - return contractData[updatedSet[i]] > contractData[updatedSet[j]] + return contractData[updatedSet[i].ID] > contractData[updatedSet[j].ID] }) - for _, c := range updatedSet[state.cfg.Contracts.Amount:] { - toStopUsing[c] = "truncated" + for _, contract := range updatedSet[state.cfg.Contracts.Amount:] { + toStopUsing[contract.ID] = "truncated" } updatedSet = updatedSet[:state.cfg.Contracts.Amount] } + // convert to set of file contract ids + var newSet []types.FileContractID + for _, contract := range updatedSet { + newSet = append(newSet, contract.ID) + } + // update contract set if c.ap.isStopped() { return false, errors.New("autopilot stopped before maintenance could be completed") } - err = c.ap.bus.SetContractSet(ctx, state.cfg.Contracts.Set, updatedSet) + err = c.ap.bus.SetContractSet(ctx, state.cfg.Contracts.Set, newSet) if err != nil { return false, err } @@ -412,54 +445,77 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) ( return c.computeContractSetChanged(ctx, state.cfg.Contracts.Set, currentSet, updatedSet, formed, refreshed, renewed, toStopUsing, contractData), nil } -func (c *contractor) computeContractSetChanged(ctx context.Context, name string, oldSet []api.ContractMetadata, newSet, formed []types.FileContractID, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { - // build some maps for easier lookups - previous := make(map[types.FileContractID]struct{}) +func (c *contractor) computeContractSetChanged(ctx context.Context, name string, oldSet, newSet []api.ContractMetadata, formed []api.ContractMetadata, refreshed, renewed []renewal, toStopUsing map[types.FileContractID]string, contractData map[types.FileContractID]uint64) bool { + // build set lookups + inOldSet := make(map[types.FileContractID]struct{}) for _, c := range oldSet { - previous[c.ID] = struct{}{} + inOldSet[c.ID] = struct{}{} } - updated := make(map[types.FileContractID]struct{}) + inNewSet := make(map[types.FileContractID]struct{}) for _, c := range newSet { - updated[c] = struct{}{} + inNewSet[c.ID] = struct{}{} } + + // build renewal lookups renewalsFromTo := make(map[types.FileContractID]types.FileContractID) renewalsToFrom := make(map[types.FileContractID]types.FileContractID) for _, c := range append(refreshed, renewed...) { - renewalsFromTo[c.from] = c.to - renewalsToFrom[c.to] = c.from + renewalsFromTo[c.from.ID] = c.to.ID + renewalsToFrom[c.to.ID] = c.from.ID } // log added and removed contracts - var added []types.FileContractID - var removed []types.FileContractID - removedReasons := make(map[string]string) + setAdditions := make(map[types.FileContractID]contractSetAdditions) + setRemovals := make(map[types.FileContractID]contractSetRemovals) + now := api.TimeNow() for _, contract := range oldSet { - _, exists := updated[contract.ID] - _, renewed := updated[renewalsFromTo[contract.ID]] + _, exists := inNewSet[contract.ID] + _, renewed := inNewSet[renewalsFromTo[contract.ID]] if !exists && !renewed { - removed = append(removed, contract.ID) reason, ok := toStopUsing[contract.ID] if !ok { reason = "unknown" } - removedReasons[contract.ID.String()] = reason + + if _, exists := setRemovals[contract.ID]; !exists { + setRemovals[contract.ID] = contractSetRemovals{ + HostKey: contract.HostKey, + } + } + removals := setRemovals[contract.ID] + removals.Removals = append(removals.Removals, contractSetRemoval{ + Size: contractData[contract.ID], + Reason: reason, + Time: now, + }) + setRemovals[contract.ID] = removals c.logger.Debugf("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason) } } - for _, fcid := range newSet { - _, existed := previous[fcid] - _, renewed := renewalsToFrom[fcid] + for _, contract := range newSet { + _, existed := inOldSet[contract.ID] + _, renewed := renewalsToFrom[contract.ID] if !existed && !renewed { - added = append(added, fcid) - c.logger.Debugf("contract %v was added to the contract set, size: %v", fcid, contractData[fcid]) + if _, exists := setAdditions[contract.ID]; !exists { + setAdditions[contract.ID] = contractSetAdditions{ + HostKey: contract.HostKey, + } + } + additions := setAdditions[contract.ID] + additions.Additions = append(additions.Additions, contractSetAddition{ + Size: contractData[contract.ID], + Time: now, + }) + setAdditions[contract.ID] = additions + c.logger.Debugf("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID]) } } // log renewed contracts that did not make it into the contract set for _, fcid := range renewed { - _, exists := updated[fcid.to] + _, exists := inNewSet[fcid.to.ID] if !exists { - c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to]) + c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to.ID]) } } @@ -470,9 +526,8 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, } // record churn metrics - now := api.TimeNow() var metrics []api.ContractSetChurnMetric - for _, fcid := range added { + for fcid := range setAdditions { metrics = append(metrics, api.ContractSetChurnMetric{ Name: c.ap.state.cfg.Contracts.Set, ContractID: fcid, @@ -480,12 +535,12 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, Timestamp: now, }) } - for _, fcid := range removed { + for fcid, removal := range setRemovals { metrics = append(metrics, api.ContractSetChurnMetric{ Name: c.ap.state.cfg.Contracts.Set, ContractID: fcid, Direction: api.ChurnDirRemoved, - Reason: removedReasons[fcid.String()], + Reason: removal.Removals[0].Reason, Timestamp: now, }) } @@ -502,12 +557,16 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string, "renewed", len(renewed), "refreshed", len(refreshed), "contracts", len(newSet), - "added", len(added), - "removed", len(removed), + "added", len(setAdditions), + "removed", len(setRemovals), ) - hasChanged := len(added)+len(removed) > 0 + hasChanged := len(setAdditions)+len(setRemovals) > 0 if hasChanged { - c.ap.RegisterAlert(ctx, newContractSetChangeAlert(name, len(added), len(removed), removedReasons)) + if !c.ap.HasAlert(ctx, alertChurnID) { + c.churn.Reset() + } + c.churn.Apply(setAdditions, setRemovals) + c.ap.RegisterAlert(ctx, c.churn.Alert(name)) } return hasChanged } @@ -602,7 +661,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error { return nil } -func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []types.FileContractID, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { +func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts []api.Contract, inCurrentSet map[types.FileContractID]struct{}, minScore float64) (toKeep []api.ContractMetadata, toArchive, toStopUsing map[types.FileContractID]string, toRefresh, toRenew []contractInfo, _ error) { if c.ap.isStopped() { return } @@ -734,7 +793,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } else if !state.cfg.Hosts.AllowRedundantIPs && ipFilter.IsRedundantIP(contract.HostIP, contract.HostKey) { toStopUsing[fcid] = fmt.Sprintf("%v; %v", errHostRedundantIP, errContractNoRevision) } else { - toKeep = append(toKeep, fcid) + toKeep = append(toKeep, contract.ContractMetadata) remainingKeepLeeway-- // we let it slide } continue // can't perform contract checks without revision @@ -777,18 +836,17 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts } else if refresh { toRefresh = append(toRefresh, ci) } else if usable { - toKeep = append(toKeep, ci.contract.ID) + toKeep = append(toKeep, ci.contract.ContractMetadata) } } return toKeep, toArchive, toStopUsing, toRefresh, toRenew, nil } -func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) ([]types.FileContractID, error) { +func (c *contractor) runContractFormations(ctx context.Context, w Worker, candidates scoredHosts, usedHosts map[types.PublicKey]struct{}, unusableHosts unusableHostResult, missing uint64, budget *types.Currency) (formed []api.ContractMetadata, _ error) { if c.ap.isStopped() { return nil, nil } - var formed []types.FileContractID // convenience variables state := c.ap.State() @@ -890,7 +948,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid formedContract, proceed, err := c.formContract(ctx, w, host, minInitialContractFunds, maxInitialContractFunds, budget) if err == nil { // add contract to contract set - formed = append(formed, formedContract.ID) + formed = append(formed, formedContract) missing-- } if !proceed { @@ -970,7 +1028,7 @@ func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont } } -func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []contractInfo) { +func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) { c.logger.Debugw( "run contracts renewals", "torenew", len(toRenew), @@ -1004,11 +1062,11 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew if err != nil { c.ap.RegisterAlert(ctx, newContractRenewalFailedAlert(contract, !proceed, err)) if toRenew[i].usable { - toKeep = append(toKeep, toRenew[i]) + toKeep = append(toKeep, toRenew[i].contract.ContractMetadata) } } else { c.ap.DismissAlert(ctx, alertIDForContract(alertRenewalFailedID, contract.ID)) - renewals = append(renewals, renewal{from: contract.ID, to: renewed.ID, ci: toRenew[i]}) + renewals = append(renewals, renewal{from: contract, to: renewed, ci: toRenew[i]}) } // break if we don't want to proceed @@ -1021,7 +1079,7 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew // they're usable and we have 'limit' left for j := i; j < len(toRenew); j++ { if len(renewals)+len(toKeep) < limit && toRenew[j].usable { - toKeep = append(toKeep, toRenew[j]) + toKeep = append(toKeep, toRenew[j].contract.ContractMetadata) } } @@ -1051,7 +1109,7 @@ func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefre // refresh and add if it succeeds renewed, proceed, err := c.refreshContract(ctx, w, ci, budget) if err == nil { - refreshed = append(refreshed, renewal{from: ci.contract.ID, to: renewed.ID, ci: ci}) + refreshed = append(refreshed, renewal{from: ci.contract.ContractMetadata, to: renewed, ci: ci}) } // break if we don't want to proceed diff --git a/build/network.go b/build/network.go index 4183a62bc7..a0a4521896 100644 --- a/build/network.go +++ b/build/network.go @@ -3,9 +3,9 @@ package build //go:generate go run gen.go import ( - "go.sia.tech/core/chain" "go.sia.tech/core/consensus" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) // Network returns the Sia network consts and genesis block for the current build. diff --git a/bus/bus.go b/bus/bus.go index d11550595a..fbda894d77 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -126,7 +126,7 @@ type ( ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) - DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error + DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) Bucket(_ context.Context, bucketName string) (api.Bucket, error) CreateBucket(_ context.Context, bucketName string, policy api.BucketPolicy) error @@ -137,9 +137,10 @@ type ( CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath, dstPath, mimeType string, metadata api.ObjectUserMetadata) (api.ObjectMetadata, error) ListObjects(ctx context.Context, bucketName, prefix, sortBy, sortDir, marker string, limit int) (api.ObjectsListResponse, error) Object(ctx context.Context, bucketName, path string) (api.Object, error) + ObjectMetadata(ctx context.Context, bucketName, path string) (api.Object, error) ObjectEntries(ctx context.Context, bucketName, path, prefix, sortBy, sortDir, marker string, offset, limit int) ([]api.ObjectMetadata, bool, error) ObjectsBySlabKey(ctx context.Context, bucketName string, slabKey object.EncryptionKey) ([]api.ObjectMetadata, error) - ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, error) + ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) RemoveObject(ctx context.Context, bucketName, path string) error RemoveObjects(ctx context.Context, bucketName, prefix string) error RenameObject(ctx context.Context, bucketName, from, to string, force bool) error @@ -1191,13 +1192,22 @@ func (b *bus) objectsHandlerGET(jc jape.Context) { if jc.DecodeForm("bucket", &bucket) != nil { return } + var onlymetadata bool + if jc.DecodeForm("onlymetadata", &onlymetadata) != nil { + return + } - o, err := b.ms.Object(jc.Request.Context(), bucket, path) + var o api.Object + var err error + if onlymetadata { + o, err = b.ms.ObjectMetadata(jc.Request.Context(), bucket, path) + } else { + o, err = b.ms.Object(jc.Request.Context(), bucket, path) + } if errors.Is(err, api.ErrObjectNotFound) { jc.Error(err, http.StatusNotFound) return - } - if jc.Check("couldn't load object", err) != nil { + } else if jc.Check("couldn't load object", err) != nil { return } jc.Encode(api.ObjectsResponse{Object: &o}) @@ -1348,7 +1358,11 @@ func (b *bus) slabbuffersHandlerGET(jc jape.Context) { } func (b *bus) objectsStatshandlerGET(jc jape.Context) { - info, err := b.ms.ObjectsStats(jc.Request.Context()) + opts := api.ObjectsStatsOpts{} + if jc.DecodeForm("bucket", &opts.Bucket) != nil { + return + } + info, err := b.ms.ObjectsStats(jc.Request.Context(), opts) if jc.Check("couldn't get objects stats", err) != nil { return } @@ -1395,9 +1409,11 @@ func (b *bus) sectorsHostRootHandlerDELETE(jc jape.Context) { } else if jc.DecodeParam("root", &root) != nil { return } - err := b.ms.DeleteHostSector(jc.Request.Context(), hk, root) + n, err := b.ms.DeleteHostSector(jc.Request.Context(), hk, root) if jc.Check("failed to mark sector as lost", err) != nil { return + } else if n > 0 { + b.logger.Infow("successfully marked sector as lost", "hk", hk, "root", root) } } @@ -1711,8 +1727,40 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { }, nil } -func (b *bus) handleGETAlerts(c jape.Context) { - c.Encode(b.alertMgr.Active()) +func (b *bus) handleGETAlertsDeprecated(jc jape.Context) { + ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{Offset: 0, Limit: -1}) + if jc.Check("failed to fetch alerts", err) != nil { + return + } + jc.Encode(ar.Alerts) +} + +func (b *bus) handleGETAlerts(jc jape.Context) { + if jc.Request.FormValue("offset") == "" && jc.Request.FormValue("limit") == "" { + b.handleGETAlertsDeprecated(jc) + return + } + offset, limit := 0, -1 + var severity alerts.Severity + if jc.DecodeForm("offset", &offset) != nil { + return + } else if jc.DecodeForm("limit", &limit) != nil { + return + } else if offset < 0 { + jc.Error(errors.New("offset must be non-negative"), http.StatusBadRequest) + return + } else if jc.DecodeForm("severity", &severity) != nil { + return + } + ar, err := b.alertMgr.Alerts(jc.Request.Context(), alerts.AlertsOpts{ + Offset: offset, + Limit: limit, + Severity: severity, + }) + if jc.Check("failed to fetch alerts", err) != nil { + return + } + jc.Encode(ar) } func (b *bus) handlePOSTAlertsDismiss(jc jape.Context) { @@ -2159,9 +2207,13 @@ func (b *bus) multipartHandlerCreatePOST(jc jape.Context) { return } - key := req.Key - if key == (object.EncryptionKey{}) { + var key object.EncryptionKey + if req.GenerateKey { + key = object.GenerateEncryptionKey() + } else if req.Key == nil { key = object.NoOpKey + } else { + key = *req.Key } resp, err := b.ms.CreateMultipartUpload(jc.Request.Context(), req.Bucket, req.Path, key, req.MimeType, req.Metadata) diff --git a/bus/client/alerts.go b/bus/client/alerts.go index 6af68c78da..28c3b9a843 100644 --- a/bus/client/alerts.go +++ b/bus/client/alerts.go @@ -2,20 +2,38 @@ package client import ( "context" + "fmt" + "net/url" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" ) // Alerts fetches the active alerts from the bus. -func (c *Client) Alerts() (alerts []alerts.Alert, err error) { - err = c.c.GET("/alerts", &alerts) +func (c *Client) Alerts(ctx context.Context, opts alerts.AlertsOpts) (resp alerts.AlertsResponse, err error) { + values := url.Values{} + values.Set("offset", fmt.Sprint(opts.Offset)) + if opts.Limit != 0 { + values.Set("limit", fmt.Sprint(opts.Limit)) + } + if opts.Severity != 0 { + values.Set("severity", opts.Severity.String()) + } + err = c.c.WithContext(ctx).GET("/alerts?"+values.Encode(), &resp) return } // DismissAlerts dimisses the alerts with the given IDs. func (c *Client) DismissAlerts(ctx context.Context, ids ...types.Hash256) error { - return c.c.WithContext(ctx).POST("/alerts/dismiss", ids, nil) + return c.dismissAlerts(ctx, false, ids...) +} + +func (c *Client) dismissAlerts(ctx context.Context, all bool, ids ...types.Hash256) error { + values := url.Values{} + if all { + values.Set("all", fmt.Sprint(true)) + } + return c.c.WithContext(ctx).POST("/alerts/dismiss?"+values.Encode(), ids, nil) } // RegisterAlert registers the given alert. diff --git a/bus/client/multipart-upload.go b/bus/client/multipart-upload.go index ffa4d8dc87..2810194871 100644 --- a/bus/client/multipart-upload.go +++ b/bus/client/multipart-upload.go @@ -46,11 +46,12 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, bucket, path, uplo // CreateMultipartUpload creates a new multipart upload. func (c *Client) CreateMultipartUpload(ctx context.Context, bucket, path string, opts api.CreateMultipartOptions) (resp api.MultipartCreateResponse, err error) { err = c.c.WithContext(ctx).POST("/multipart/create", api.MultipartCreateRequest{ - Bucket: bucket, - Path: path, - Key: opts.Key, - MimeType: opts.MimeType, - Metadata: opts.Metadata, + Bucket: bucket, + GenerateKey: opts.GenerateKey, + Path: path, + Key: opts.Key, + MimeType: opts.MimeType, + Metadata: opts.Metadata, }, &resp) return } diff --git a/bus/client/objects.go b/bus/client/objects.go index 38a7b14cda..23011a9ba9 100644 --- a/bus/client/objects.go +++ b/bus/client/objects.go @@ -82,8 +82,12 @@ func (c *Client) ObjectsBySlabKey(ctx context.Context, bucket string, key object } // ObjectsStats returns information about the number of objects and their size. -func (c *Client) ObjectsStats() (osr api.ObjectsStatsResponse, err error) { - err = c.c.GET("/stats/objects", &osr) +func (c *Client) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (osr api.ObjectsStatsResponse, err error) { + values := url.Values{} + if opts.Bucket != "" { + values.Set("bucket", opts.Bucket) + } + err = c.c.WithContext(ctx).GET("/stats/objects?"+values.Encode(), &osr) return } diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 391d77ea35..47668ff946 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -11,7 +11,7 @@ import ( "strings" "go.sia.tech/core/types" - "go.sia.tech/core/wallet" + "go.sia.tech/coreutils/wallet" "golang.org/x/term" "gopkg.in/yaml.v3" "lukechampine.com/frand" diff --git a/go.mod b/go.mod index 084e6516a5..a000468a70 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module go.sia.tech/renterd -go 1.21 - -toolchain go1.21.6 +go 1.21.6 require ( github.com/gabriel-vasile/mimetype v1.4.3 @@ -10,21 +8,22 @@ require ( github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.19.0 github.com/klauspost/reedsolomon v1.12.1 - github.com/minio/minio-go/v7 v7.0.66 + github.com/minio/minio-go/v7 v7.0.67 github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe - go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b + go.sia.tech/core v0.2.1 + go.sia.tech/coreutils v0.0.3 go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 - go.sia.tech/hostd v0.3.0-beta.1 - go.sia.tech/jape v0.11.1 + go.sia.tech/hostd v1.0.2 + go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 go.sia.tech/mux v1.2.0 go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca - go.sia.tech/web/renterd v0.44.0 - go.uber.org/zap v1.26.0 + go.sia.tech/web/renterd v0.46.0 + go.uber.org/zap v1.27.0 golang.org/x/crypto v0.19.0 golang.org/x/term v0.17.0 gopkg.in/yaml.v3 v3.0.1 - gorm.io/driver/mysql v1.5.2 + gorm.io/driver/mysql v1.5.4 gorm.io/driver/sqlite v1.5.5 gorm.io/gorm v1.25.7 lukechampine.com/frand v1.4.2 @@ -32,8 +31,8 @@ require ( require ( github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go v1.49.1 // indirect - github.com/cloudflare/cloudflare-go v0.75.0 // indirect + github.com/aws/aws-sdk-go v1.50.1 // indirect + github.com/cloudflare/cloudflare-go v0.86.0 // indirect github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-sql-driver/mysql v1.7.1 // indirect @@ -76,7 +75,7 @@ require ( gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.20.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index d5a601d4b6..cbcb15f69f 100644 --- a/go.sum +++ b/go.sum @@ -9,14 +9,14 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.50.1 h1:AwnLUM7TcH9vMZqA4TcDKmGfLmDW5VXwT5tPH6kXylo= +github.com/aws/aws-sdk-go v1.50.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.75.0 h1:03a4EkwwsDo0yAHjQ/l+D36K9wTkvr0afDiI/uHQ0Xw= -github.com/cloudflare/cloudflare-go v0.75.0/go.mod h1:5ocQT9qQ99QsT1Ii2751490Z5J+W/nv6jOj+lSAe4ug= +github.com/cloudflare/cloudflare-go v0.86.0 h1:jEKN5VHNYNYtfDL2lUFLTRo+nOVNPFxpXTstVx0rqHI= +github.com/cloudflare/cloudflare-go v0.86.0/go.mod h1:wYW/5UP02TUfBToa/yKbQHV+r6h1NnJ1Je7XjuGM4Jw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -126,8 +126,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= @@ -135,8 +135,8 @@ github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= +github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= +github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -239,31 +239,33 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b h1:xJSxYN2kZD3NAijHIwjXhG5+7GoPyjDNIJPEoD3b72g= -go.sia.tech/core v0.1.12-0.20231211182757-77190f04f90b/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= +go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= +go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= +go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= +go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 h1:ulzfJNjxN5DjXHClkW2pTiDk+eJ+0NQhX87lFDZ03t0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= -go.sia.tech/hostd v0.3.0-beta.1 h1:A2RL4wkW18eb28+fJtdyK9OYNiiwpCDO8FO3cyT9r7A= -go.sia.tech/hostd v0.3.0-beta.1/go.mod h1:gVtU631RkbtOEHJKb8qghudhWcYIL8w3phjvV2/bz0A= -go.sia.tech/jape v0.11.1 h1:M7IP+byXL7xOqzxcHUQuXW+q3sYMkYzmMlMw+q8ZZw0= -go.sia.tech/jape v0.11.1/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= +go.sia.tech/hostd v1.0.2 h1:GjzNIAlwg3/dViF6258Xn5DI3+otQLRqmkoPDugP+9Y= +go.sia.tech/hostd v1.0.2/go.mod h1:zGw+AGVmazAp4ydvo7bZLNKTy1J51RI6Mp/oxRtYT6c= +go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= +go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= go.sia.tech/mux v1.2.0/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca h1:aZMg2AKevn7jKx+wlusWQfwSM5pNU9aGtRZme29q3O4= go.sia.tech/siad v1.5.10-0.20230228235644-3059c0b930ca/go.mod h1:h/1afFwpxzff6/gG5i1XdAgPK7dEY6FaibhK7N5F86Y= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff h1:/nE7nhewDRxzEdtSKT4SkiUwtjPSiy7Xz7CHEW3MaGQ= go.sia.tech/web v0.0.0-20231213145933-3f175a86abff/go.mod h1:RKODSdOmR3VtObPAcGwQqm4qnqntDVFylbvOBbWYYBU= -go.sia.tech/web/renterd v0.44.0 h1:yKu1Kq/6ssV9Vbv4oa+sn2Pc2TNyfcrv/mRPNOuYuB0= -go.sia.tech/web/renterd v0.44.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= +go.sia.tech/web/renterd v0.46.0 h1:BMVg4i7LxSlc8wZ4T0EG1k3EK4JxVIzCfD3/cjmwH0k= +go.sia.tech/web/renterd v0.46.0/go.mod h1:FgXrdmAnu591a3h96RB/15pMZ74xO9457g902uE06BM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -301,8 +303,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -389,11 +391,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs= -gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8= +gorm.io/driver/mysql v1.5.4 h1:igQmHfKcbaTVyAIHNhhB888vvxh8EdQ2uSUT0LPcBso= +gorm.io/driver/mysql v1.5.4/go.mod h1:9rYxJph/u9SWkWc9yY4XJ1F/+xO0S/ChOmbk3+Z5Tvs= gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E= gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE= -gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= +gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/node/transactionpool.go b/internal/node/transactionpool.go index b2226bfb54..c5582a757a 100644 --- a/internal/node/transactionpool.go +++ b/internal/node/transactionpool.go @@ -15,7 +15,7 @@ type txpool struct { func (tp txpool) RecommendedFee() (fee types.Currency) { _, max := tp.tp.FeeEstimation() - convertToCore(&max, &fee) + convertToCore(&max, (*types.V1Currency)(&fee)) return } diff --git a/internal/testing/cluster_test.go b/internal/testing/cluster_test.go index 8a93764d81..ae54083c16 100644 --- a/internal/testing/cluster_test.go +++ b/internal/testing/cluster_test.go @@ -698,25 +698,33 @@ func TestUploadDownloadExtended(t *testing.T) { } // check objects stats. - info, err := cluster.Bus.ObjectsStats() - tt.OK(err) - objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) - if info.TotalObjectsSize != objectsSize { - t.Error("wrong size", info.TotalObjectsSize, objectsSize) - } - sectorsSize := 15 * rhpv2.SectorSize - if info.TotalSectorsSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalSectorsSize, sectorsSize) - } - if info.TotalUploadedSize != uint64(sectorsSize) { - t.Error("wrong size", info.TotalUploadedSize, sectorsSize) - } - if info.NumObjects != 4 { - t.Error("wrong number of objects", info.NumObjects, 4) - } - if info.MinHealth != 1 { - t.Errorf("expected minHealth of 1, got %v", info.MinHealth) - } + tt.Retry(100, 100*time.Millisecond, func() error { + for _, opts := range []api.ObjectsStatsOpts{ + {}, // any bucket + {Bucket: api.DefaultBucketName}, // specific bucket + } { + info, err := cluster.Bus.ObjectsStats(context.Background(), opts) + tt.OK(err) + objectsSize := uint64(len(file1) + len(file2) + len(small) + len(large)) + if info.TotalObjectsSize != objectsSize { + return fmt.Errorf("wrong size %v %v", info.TotalObjectsSize, objectsSize) + } + sectorsSize := 15 * rhpv2.SectorSize + if info.TotalSectorsSize != uint64(sectorsSize) { + return fmt.Errorf("wrong size %v %v", info.TotalSectorsSize, sectorsSize) + } + if info.TotalUploadedSize != uint64(sectorsSize) { + return fmt.Errorf("wrong size %v %v", info.TotalUploadedSize, sectorsSize) + } + if info.NumObjects != 4 { + return fmt.Errorf("wrong number of objects %v %v", info.NumObjects, 4) + } + if info.MinHealth != 1 { + return fmt.Errorf("expected minHealth of 1, got %v", info.MinHealth) + } + } + return nil + }) // download the data for _, data := range [][]byte{small, large} { @@ -1309,7 +1317,7 @@ func TestUploadDownloadSameHost(t *testing.T) { // build a frankenstein object constructed with all sectors on the same host res.Object.Slabs[0].Shards = shards[res.Object.Slabs[0].Shards[0].LatestHost] - tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", testContractSet, res.Object.Object, api.AddObjectOptions{})) + tt.OK(b.AddObject(context.Background(), api.DefaultBucketName, "frankenstein", testContractSet, *res.Object.Object, api.AddObjectOptions{})) // assert we can download this object tt.OK(w.DownloadObject(context.Background(), io.Discard, api.DefaultBucketName, "frankenstein", api.DownloadObjectOptions{})) @@ -1634,7 +1642,7 @@ func TestUploadPacking(t *testing.T) { download("file4", data4, 0, int64(len(data4))) // assert number of objects - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 5 { t.Fatalf("expected 5 objects, got %v", os.NumObjects) @@ -1643,7 +1651,7 @@ func TestUploadPacking(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -1797,7 +1805,7 @@ func TestSlabBufferStats(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data1), api.DefaultBucketName, "1", api.UploadObjectOptions{})) // assert number of objects - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 1 { t.Fatalf("expected 1 object, got %d", os.NumObjects) @@ -1806,7 +1814,7 @@ func TestSlabBufferStats(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -1854,7 +1862,7 @@ func TestSlabBufferStats(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(data2), api.DefaultBucketName, "2", api.UploadObjectOptions{})) // assert number of objects - os, err = b.ObjectsStats() + os, err = b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 2 { t.Fatalf("expected 1 object, got %d", os.NumObjects) @@ -1863,7 +1871,7 @@ func TestSlabBufferStats(t *testing.T) { // check the object size stats, we use a retry loop since packed slabs are // uploaded in a separate goroutine, so the object stats might lag a bit tt.Retry(60, time.Second, func() error { - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.TotalObjectsSize != uint64(len(data1)+len(data2)) { return fmt.Errorf("expected totalObjectSize of %d, got %d", len(data1)+len(data2), os.TotalObjectsSize) @@ -1916,9 +1924,9 @@ func TestAlerts(t *testing.T) { tt.OK(b.RegisterAlert(context.Background(), alert)) findAlert := func(id types.Hash256) *alerts.Alert { t.Helper() - alerts, err := b.Alerts() + ar, err := b.Alerts(context.Background(), alerts.AlertsOpts{}) tt.OK(err) - for _, alert := range alerts { + for _, alert := range ar.Alerts { if alert.ID == id { return &alert } @@ -1939,6 +1947,72 @@ func TestAlerts(t *testing.T) { if foundAlert != nil { t.Fatal("alert found") } + + // register 2 alerts + alert2 := alert + alert2.ID = frand.Entropy256() + alert2.Timestamp = time.Now().Add(time.Second) + tt.OK(b.RegisterAlert(context.Background(), alert)) + tt.OK(b.RegisterAlert(context.Background(), alert2)) + if foundAlert := findAlert(alert.ID); foundAlert == nil { + t.Fatal("alert not found") + } else if foundAlert := findAlert(alert2.ID); foundAlert == nil { + t.Fatal("alert not found") + } + + // try to find with offset = 1 + ar, err := b.Alerts(context.Background(), alerts.AlertsOpts{Offset: 1}) + foundAlerts := ar.Alerts + tt.OK(err) + if len(foundAlerts) != 1 || foundAlerts[0].ID != alert.ID { + t.Fatal("wrong alert") + } + + // try to find with limit = 1 + ar, err = b.Alerts(context.Background(), alerts.AlertsOpts{Limit: 1}) + foundAlerts = ar.Alerts + tt.OK(err) + if len(foundAlerts) != 1 || foundAlerts[0].ID != alert2.ID { + t.Fatal("wrong alert") + } + + // register more alerts + for severity := alerts.SeverityInfo; severity <= alerts.SeverityCritical; severity++ { + for j := 0; j < 3*int(severity); j++ { + tt.OK(b.RegisterAlert(context.Background(), alerts.Alert{ + ID: frand.Entropy256(), + Severity: severity, + Message: "test", + Data: map[string]interface{}{ + "origin": "test", + }, + Timestamp: time.Now(), + })) + } + } + for severity := alerts.SeverityInfo; severity <= alerts.SeverityCritical; severity++ { + ar, err = b.Alerts(context.Background(), alerts.AlertsOpts{Severity: severity}) + tt.OK(err) + if ar.Total() != 32 { + t.Fatal("expected 32 alerts", ar.Total()) + } else if ar.Totals.Info != 3 { + t.Fatal("expected 3 info alerts", ar.Totals.Info) + } else if ar.Totals.Warning != 6 { + t.Fatal("expected 6 warning alerts", ar.Totals.Warning) + } else if ar.Totals.Error != 9 { + t.Fatal("expected 9 error alerts", ar.Totals.Error) + } else if ar.Totals.Critical != 14 { + t.Fatal("expected 14 critical alerts", ar.Totals.Critical) + } else if severity == alerts.SeverityInfo && len(ar.Alerts) != ar.Totals.Info { + t.Fatalf("expected %v info alerts, got %v", ar.Totals.Info, len(ar.Alerts)) + } else if severity == alerts.SeverityWarning && len(ar.Alerts) != ar.Totals.Warning { + t.Fatalf("expected %v warning alerts, got %v", ar.Totals.Warning, len(ar.Alerts)) + } else if severity == alerts.SeverityError && len(ar.Alerts) != ar.Totals.Error { + t.Fatalf("expected %v error alerts, got %v", ar.Totals.Error, len(ar.Alerts)) + } else if severity == alerts.SeverityCritical && len(ar.Alerts) != ar.Totals.Critical { + t.Fatalf("expected %v critical alerts, got %v", ar.Totals.Critical, len(ar.Alerts)) + } + } } func TestMultipartUploads(t *testing.T) { @@ -1958,7 +2032,7 @@ func TestMultipartUploads(t *testing.T) { // Start a new multipart upload. objPath := "/foo" - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: object.GenerateEncryptionKey()}) + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{GenerateKey: true}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") @@ -1977,7 +2051,7 @@ func TestMultipartUploads(t *testing.T) { // correctly. putPart := func(partNum int, offset int, data []byte) string { t.Helper() - res, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(data), api.DefaultBucketName, objPath, mpr.UploadID, partNum, api.UploadMultipartUploadPartOptions{EncryptionOffset: offset}) + res, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(data), api.DefaultBucketName, objPath, mpr.UploadID, partNum, api.UploadMultipartUploadPartOptions{EncryptionOffset: &offset}) tt.OK(err) if res.ETag == "" { t.Fatal("expected non-empty ETag") @@ -2007,7 +2081,7 @@ func TestMultipartUploads(t *testing.T) { } // Check objects stats. - os, err := b.ObjectsStats() + os, err := b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 0 { t.Fatalf("expected 0 object, got %v", os.NumObjects) @@ -2066,7 +2140,7 @@ func TestMultipartUploads(t *testing.T) { } // Check objects stats. - os, err = b.ObjectsStats() + os, err = b.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) tt.OK(err) if os.NumObjects != 1 { t.Fatalf("expected 1 object, got %v", os.NumObjects) @@ -2316,12 +2390,13 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { defer cluster.Shutdown() b := cluster.Bus w := cluster.Worker - slabSize := testRedundancySettings.SlabSizeWithRedundancy() + slabSize := testRedundancySettings.SlabSize() tt := cluster.tt // start a new multipart upload. We upload the parts in reverse order objPath := "/foo" - mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: object.GenerateEncryptionKey()}) + key := object.GenerateEncryptionKey() + mpr, err := b.CreateMultipartUpload(context.Background(), api.DefaultBucketName, objPath, api.CreateMultipartOptions{Key: &key}) tt.OK(err) if mpr.UploadID == "" { t.Fatal("expected non-empty upload ID") @@ -2329,22 +2404,25 @@ func TestMultipartUploadWrappedByPartialSlabs(t *testing.T) { // upload a part that is a partial slab part3Data := bytes.Repeat([]byte{3}, int(slabSize)/4) + offset := int(slabSize + slabSize/4) resp3, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part3Data), api.DefaultBucketName, objPath, mpr.UploadID, 3, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: int(slabSize + slabSize/4), + EncryptionOffset: &offset, }) tt.OK(err) // upload a part that is exactly a full slab part2Data := bytes.Repeat([]byte{2}, int(slabSize)) + offset = int(slabSize / 4) resp2, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part2Data), api.DefaultBucketName, objPath, mpr.UploadID, 2, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: int(slabSize / 4), + EncryptionOffset: &offset, }) tt.OK(err) // upload another part the same size as the first one part1Data := bytes.Repeat([]byte{1}, int(slabSize)/4) + offset = 0 resp1, err := w.UploadMultipartUploadPart(context.Background(), bytes.NewReader(part1Data), api.DefaultBucketName, objPath, mpr.UploadID, 1, api.UploadMultipartUploadPartOptions{ - EncryptionOffset: 0, + EncryptionOffset: &offset, }) tt.OK(err) diff --git a/object/object.go b/object/object.go index 49375f3b48..965ebce2af 100644 --- a/object/object.go +++ b/object/object.go @@ -3,6 +3,7 @@ package object import ( "bytes" "crypto/cipher" + "crypto/md5" "encoding/binary" "encoding/hex" "fmt" @@ -43,6 +44,9 @@ func (k *EncryptionKey) UnmarshalBinary(b []byte) error { // String implements fmt.Stringer. func (k EncryptionKey) String() string { + if k.entropy == nil { + return "" + } return "key:" + hex.EncodeToString(k.entropy[:]) } @@ -110,9 +114,12 @@ func GenerateEncryptionKey() EncryptionKey { } // An Object is a unit of data that has been stored on a host. +// NOTE: Object is embedded in the API's Object type, so all fields should be +// tagged omitempty to make sure responses where no object is returned remain +// clean. type Object struct { - Key EncryptionKey `json:"key"` - Slabs []SlabSlice `json:"slabs"` + Key EncryptionKey `json:"key,omitempty"` + Slabs []SlabSlice `json:"slabs,omitempty"` } // NewObject returns a new Object with a random key. @@ -139,6 +146,22 @@ func (o Object) Contracts() map[types.PublicKey]map[types.FileContractID]struct{ return usedContracts } +func (o *Object) ComputeETag() string { + // calculate the eTag using the precomputed sector roots to avoid having to + // hash the entire object again. + h := md5.New() + b := make([]byte, 8) + for _, slab := range o.Slabs { + binary.LittleEndian.PutUint32(b[:4], slab.Offset) + binary.LittleEndian.PutUint32(b[4:], slab.Length) + h.Write(b) + for _, shard := range slab.Shards { + h.Write(shard.Root[:]) + } + } + return string(hex.EncodeToString(h.Sum(nil))) +} + // TotalSize returns the total size of the object. func (o Object) TotalSize() int64 { var n int64 diff --git a/object/slab.go b/object/slab.go index 9c3afa6088..f2762abf33 100644 --- a/object/slab.go +++ b/object/slab.go @@ -3,6 +3,7 @@ package object import ( "bytes" "io" + "sync" "github.com/klauspost/reedsolomon" rhpv2 "go.sia.tech/core/rhp/v2" @@ -79,11 +80,17 @@ func (s Slab) Length() int { // Encrypt xors shards with the keystream derived from s.Key, using a // different nonce for each shard. func (s Slab) Encrypt(shards [][]byte) { - for i, shard := range shards { - nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) - c.XORKeyStream(shard, shard) + var wg sync.WaitGroup + for i := range shards { + wg.Add(1) + go func(i int) { + nonce := [24]byte{1: byte(i)} + c, _ := chacha20.NewUnauthenticatedCipher(s.Key.entropy[:], nonce[:]) + c.XORKeyStream(shards[i], shards[i]) + wg.Done() + }(i) } + wg.Wait() } // Encode encodes slab data into sector-sized shards. The supplied shards should @@ -151,12 +158,18 @@ func (ss SlabSlice) SectorRegion() (offset, length uint32) { // slice offset), using a different nonce for each shard. func (ss SlabSlice) Decrypt(shards [][]byte) { offset := ss.Offset / (rhpv2.LeafSize * uint32(ss.MinShards)) - for i, shard := range shards { - nonce := [24]byte{1: byte(i)} - c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) - c.SetCounter(offset) - c.XORKeyStream(shard, shard) + var wg sync.WaitGroup + for i := range shards { + wg.Add(1) + go func(i int) { + nonce := [24]byte{1: byte(i)} + c, _ := chacha20.NewUnauthenticatedCipher(ss.Key.entropy[:], nonce[:]) + c.SetCounter(offset) + c.XORKeyStream(shards[i], shards[i]) + wg.Done() + }(i) } + wg.Wait() } // Recover recovers a slice of slab data from the supplied shards. diff --git a/s3/backend.go b/s3/backend.go index a481da7274..c05a3ec988 100644 --- a/s3/backend.go +++ b/s3/backend.go @@ -287,7 +287,10 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range // HeadObject should return a NotFound() error if the object does not // exist. func (s *s3) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) { - res, err := s.b.Object(ctx, bucketName, objectName, api.GetObjectOptions{IgnoreDelim: true}) + res, err := s.b.Object(ctx, bucketName, objectName, api.GetObjectOptions{ + IgnoreDelim: true, + OnlyMetadata: true, + }) if err != nil && strings.Contains(err.Error(), api.ErrObjectNotFound.Error()) { return nil, gofakes3.KeyNotFound(objectName) } else if err != nil { @@ -405,7 +408,7 @@ func (s *s3) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket, dstKe func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta map[string]string) (gofakes3.UploadID, error) { convertToSiaMetadataHeaders(meta) resp, err := s.b.CreateMultipartUpload(ctx, bucket, "/"+key, api.CreateMultipartOptions{ - Key: object.NoOpKey, + Key: &object.NoOpKey, MimeType: meta["Content-Type"], Metadata: api.ExtractObjectUserMetadataFrom(meta), }) @@ -418,8 +421,7 @@ func (s *s3) CreateMultipartUpload(ctx context.Context, bucket, key string, meta func (s *s3) UploadPart(ctx context.Context, bucket, object string, id gofakes3.UploadID, partNumber int, contentLength int64, input io.Reader) (*gofakes3.UploadPartResult, error) { res, err := s.w.UploadMultipartUploadPart(ctx, input, bucket, object, string(id), partNumber, api.UploadMultipartUploadPartOptions{ - DisablePreshardingEncryption: true, - ContentLength: contentLength, + ContentLength: contentLength, }) if err != nil { return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index a61f9eea3f..35872ea2dd 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -63,15 +63,8 @@ func TestSQLHostDB(t *testing.T) { // Insert an announcement for the host and another one for an unknown // host. - a := hostdb.Announcement{ - Index: types.ChainIndex{ - Height: 42, - ID: types.BlockID{1, 2, 3}, - }, - Timestamp: time.Now().UTC().Round(time.Second), - NetAddress: "address", - } - err = ss.insertTestAnnouncement(hk, a) + ann := newTestHostDBAnnouncement("address") + err = ss.insertTestAnnouncement(hk, ann) if err != nil { t.Fatal(err) } @@ -79,7 +72,7 @@ func TestSQLHostDB(t *testing.T) { // Read the host and verify that the announcement related fields were // set. var h dbHost - tx := ss.db.Where("last_announcement = ? AND net_address = ?", a.Timestamp, a.NetAddress).Find(&h) + tx := ss.db.Where("last_announcement = ? AND net_address = ?", ann.Timestamp, ann.NetAddress).Find(&h) if tx.Error != nil { t.Fatal(tx.Error) } @@ -116,7 +109,7 @@ func TestSQLHostDB(t *testing.T) { // Insert another announcement for an unknown host. unknownKey := types.PublicKey{1, 4, 7} - err = ss.insertTestAnnouncement(unknownKey, a) + err = ss.insertTestAnnouncement(unknownKey, ann) if err != nil { t.Fatal(err) } @@ -124,7 +117,7 @@ func TestSQLHostDB(t *testing.T) { if err != nil { t.Fatal(err) } - if h3.NetAddress != a.NetAddress { + if h3.NetAddress != ann.NetAddress { t.Fatal("wrong net address") } if h3.KnownSince.IsZero() { @@ -510,22 +503,18 @@ func TestInsertAnnouncements(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() - // Create announcements for 2 hosts. + // Create announcements for 3 hosts. ann1 := announcement{ - hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{ - Index: types.ChainIndex{Height: 1, ID: types.BlockID{1}}, - Timestamp: time.Now(), - NetAddress: "foo.bar:1000", - }, + hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), + announcement: newTestHostDBAnnouncement("foo.bar:1000"), } ann2 := announcement{ hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{}, + announcement: newTestHostDBAnnouncement("bar.baz:1000"), } ann3 := announcement{ hostKey: publicKey(types.GeneratePrivateKey().PublicKey()), - announcement: hostdb.Announcement{}, + announcement: newTestHostDBAnnouncement("quz.qux:1000"), } // Insert the first one and check that all fields are set. @@ -1101,7 +1090,7 @@ func (s *SQLStore) addCustomTestHost(hk types.PublicKey, na string) error { s.unappliedHostKeys[hk] = struct{}{} s.unappliedAnnouncements = append(s.unappliedAnnouncements, []announcement{{ hostKey: publicKey(hk), - announcement: hostdb.Announcement{NetAddress: na}, + announcement: newTestHostDBAnnouncement(na), }}...) s.lastSave = time.Now().Add(s.persistInterval * -2) return s.applyUpdates(false) @@ -1153,6 +1142,14 @@ func newTestHostAnnouncement(na modules.NetAddress) (modules.HostAnnouncement, t }, sk } +func newTestHostDBAnnouncement(addr string) hostdb.Announcement { + return hostdb.Announcement{ + Index: types.ChainIndex{Height: 1, ID: types.BlockID{1}}, + Timestamp: time.Now().UTC().Round(time.Second), + NetAddress: addr, + } +} + func newTestTransaction(ha modules.HostAnnouncement, sk types.PrivateKey) stypes.Transaction { var buf bytes.Buffer buf.Write(encoding.Marshal(ha)) diff --git a/stores/metadata.go b/stores/metadata.go index f20f7dbb04..c281c98007 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -410,14 +410,14 @@ func (s dbSlab) convert() (slab object.Slab, err error) { } func (raw rawObjectMetadata) convert() api.ObjectMetadata { - return api.ObjectMetadata{ - ETag: raw.ETag, - Health: raw.Health, - MimeType: raw.MimeType, - ModTime: api.TimeRFC3339(time.Time(raw.ModTime).UTC()), - Name: raw.Name, - Size: raw.Size, - } + return newObjectMetadata( + raw.Name, + raw.ETag, + raw.MimeType, + raw.Health, + time.Time(raw.ModTime), + raw.Size, + ) } func (raw rawObject) toSlabSlice() (slice object.SlabSlice, _ error) { @@ -582,70 +582,85 @@ func (s *SQLStore) ListBuckets(ctx context.Context) ([]api.Bucket, error) { // ObjectsStats returns some info related to the objects stored in the store. To // reduce locking and make sure all results are consistent, everything is done // within a single transaction. -func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, error) { +func (s *SQLStore) ObjectsStats(ctx context.Context, opts api.ObjectsStatsOpts) (api.ObjectsStatsResponse, error) { + // fetch bucket id if a bucket was specified + var bucketID uint + if opts.Bucket != "" { + err := s.db.Model(&dbBucket{}).Select("id").Where("name = ?", opts.Bucket).Take(&bucketID).Error + if err != nil { + return api.ObjectsStatsResponse{}, err + } + } + // number of objects var objInfo struct { NumObjects uint64 MinHealth float64 TotalObjectsSize uint64 } - err := s.db. + objInfoQuery := s.db. Model(&dbObject{}). - Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize"). - Scan(&objInfo). - Error + Select("COUNT(*) AS NumObjects, COALESCE(MIN(health), 1) as MinHealth, SUM(size) AS TotalObjectsSize") + if opts.Bucket != "" { + objInfoQuery = objInfoQuery.Where("db_bucket_id", bucketID) + } + err := objInfoQuery.Scan(&objInfo).Error if err != nil { return api.ObjectsStatsResponse{}, err } // number of unfinished objects var unfinishedObjects uint64 - err = s.db. + unfinishedObjectsQuery := s.db. Model(&dbMultipartUpload{}). - Select("COUNT(*)"). - Scan(&unfinishedObjects). - Error + Select("COUNT(*)") + if opts.Bucket != "" { + unfinishedObjectsQuery = unfinishedObjectsQuery.Where("db_bucket_id", bucketID) + } + err = unfinishedObjectsQuery.Scan(&unfinishedObjects).Error if err != nil { return api.ObjectsStatsResponse{}, err } // size of unfinished objects var totalUnfinishedObjectsSize uint64 - err = s.db. + totalUnfinishedObjectsSizeQuery := s.db. Model(&dbMultipartPart{}). - Select("COALESCE(SUM(size), 0)"). - Scan(&totalUnfinishedObjectsSize). - Error + Joins("INNER JOIN multipart_uploads mu ON multipart_parts.db_multipart_upload_id = mu.id"). + Select("COALESCE(SUM(size), 0)") + if opts.Bucket != "" { + totalUnfinishedObjectsSizeQuery = totalUnfinishedObjectsSizeQuery.Where("db_bucket_id", bucketID) + } + err = totalUnfinishedObjectsSizeQuery.Scan(&totalUnfinishedObjectsSize).Error if err != nil { return api.ObjectsStatsResponse{}, err } - var totalSectors uint64 + var totalSectors int64 + totalSectorsQuery := s.db. + Table("slabs sla"). + Select("COALESCE(SUM(total_shards), 0)"). + Where("db_buffered_slab_id IS NULL") - batchSize := 500000 - marker := uint64(0) - for offset := 0; ; offset += batchSize { - var result struct { - Sectors uint64 - Marker uint64 - } - res := s.db. - Model(&dbSector{}). - Raw("SELECT COUNT(*) as Sectors, MAX(sectors.db_sector_id) as Marker FROM (SELECT cs.db_sector_id FROM contract_sectors cs WHERE cs.db_sector_id > ? GROUP BY cs.db_sector_id LIMIT ?) sectors", marker, batchSize). - Scan(&result) - if err := res.Error; err != nil { - return api.ObjectsStatsResponse{}, err - } else if result.Sectors == 0 { - break // done - } - totalSectors += result.Sectors - marker = result.Marker + if opts.Bucket != "" { + totalSectorsQuery = totalSectorsQuery.Where(` + EXISTS ( + SELECT 1 FROM slices sli + INNER JOIN objects o ON o.id = sli.db_object_id AND o.db_bucket_id = ? + WHERE sli.db_slab_id = sla.id + ) + `, bucketID) + } + err = totalSectorsQuery.Scan(&totalSectors).Error + if err != nil { + return api.ObjectsStatsResponse{}, err } var totalUploaded int64 err = s.db. - Model(&dbContractSector{}). - Count(&totalUploaded). + Model(&dbContract{}). + Select("COALESCE(SUM(size), 0)"). + Scan(&totalUploaded). Error if err != nil { return api.ObjectsStatsResponse{}, err @@ -657,8 +672,8 @@ func (s *SQLStore) ObjectsStats(ctx context.Context) (api.ObjectsStatsResponse, NumUnfinishedObjects: unfinishedObjects, TotalUnfinishedObjectsSize: totalUnfinishedObjectsSize, TotalObjectsSize: objInfo.TotalObjectsSize, - TotalSectorsSize: totalSectors * rhpv2.SectorSize, - TotalUploadedSize: uint64(totalUploaded) * rhpv2.SectorSize, + TotalSectorsSize: uint64(totalSectors) * rhpv2.SectorSize, + TotalUploadedSize: uint64(totalUploaded), }, nil } @@ -1484,6 +1499,10 @@ func (s *SQLStore) RenameObjects(ctx context.Context, bucket, prefixOld, prefixN gorm.Expr(sqlConcat(tx, "?", "SUBSTR(object_id, ?)")), prefixNew, utf8.RuneCountInString(prefixOld)+1, prefixOld+"%", utf8.RuneCountInString(prefixOld), prefixOld, sqlWhereBucket("objects", bucket)) + + if !isSQLite(tx) { + inner = tx.Raw("SELECT * FROM (?) as i", inner) + } resp := tx.Model(&dbObject{}). Where("object_id IN (?)", inner). Delete(&dbObject{}) @@ -1533,13 +1552,14 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath // No copying is happening. We just update the metadata on the src // object. srcObj.MimeType = mimeType - om = api.ObjectMetadata{ - Health: srcObj.Health, - MimeType: srcObj.MimeType, - ModTime: api.TimeRFC3339(srcObj.CreatedAt.UTC()), - Name: srcObj.ObjectID, - Size: srcObj.Size, - } + om = newObjectMetadata( + srcObj.ObjectID, + srcObj.Etag, + srcObj.MimeType, + srcObj.Health, + srcObj.CreatedAt, + srcObj.Size, + ) if err := s.updateUserMetadata(tx, srcObj.ID, metadata); err != nil { return fmt.Errorf("failed to update user metadata: %w", err) } @@ -1587,21 +1607,22 @@ func (s *SQLStore) CopyObject(ctx context.Context, srcBucket, dstBucket, srcPath return fmt.Errorf("failed to create object metadata: %w", err) } - om = api.ObjectMetadata{ - MimeType: dstObj.MimeType, - ETag: dstObj.Etag, - Health: srcObj.Health, - ModTime: api.TimeRFC3339(dstObj.CreatedAt.UTC()), - Name: dstObj.ObjectID, - Size: dstObj.Size, - } + om = newObjectMetadata( + dstObj.ObjectID, + dstObj.Etag, + dstObj.MimeType, + dstObj.Health, + dstObj.CreatedAt, + dstObj.Size, + ) return nil }) return } -func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) error { - return s.retryTransaction(func(tx *gorm.DB) error { +func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) { + var deletedSectors int + err := s.retryTransaction(func(tx *gorm.DB) error { // Fetch contract_sectors to delete. var sectors []dbContractSector err := tx.Raw(` @@ -1640,6 +1661,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo } else if res.RowsAffected != int64(len(sectors)) { return fmt.Errorf("expected %v affected rows but got %v", len(sectors), res.RowsAffected) } + deletedSectors = len(sectors) // Increment the host's lostSectors by the number of lost sectors. if err := tx.Exec("UPDATE hosts SET lost_sectors = lost_sectors + ? WHERE public_key = ?", len(sectors), publicKey(hk)).Error; err != nil { @@ -1667,6 +1689,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo } return nil }) + return deletedSectors, err } func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { @@ -2297,21 +2320,57 @@ func (s *SQLStore) objectHydrate(ctx context.Context, tx *gorm.DB, bucket, path // return object return api.Object{ Metadata: metadata, - ObjectMetadata: api.ObjectMetadata{ - ETag: obj[0].ObjectETag, - Health: obj[0].ObjectHealth, - MimeType: obj[0].ObjectMimeType, - ModTime: api.TimeRFC3339(obj[0].ObjectModTime.UTC()), - Name: obj[0].ObjectName, - Size: obj[0].ObjectSize, - }, - Object: object.Object{ + ObjectMetadata: newObjectMetadata( + obj[0].ObjectName, + obj[0].ObjectETag, + obj[0].ObjectMimeType, + obj[0].ObjectHealth, + obj[0].ObjectModTime, + obj[0].ObjectSize, + ), + Object: &object.Object{ Key: key, Slabs: slabs, }, }, nil } +// ObjectMetadata returns an object's metadata +func (s *SQLStore) ObjectMetadata(ctx context.Context, bucket, path string) (api.Object, error) { + var resp api.Object + err := s.db.Transaction(func(tx *gorm.DB) error { + var obj dbObject + err := tx.Model(&dbObject{}). + Joins("INNER JOIN buckets b ON objects.db_bucket_id = b.id"). + Where("b.name", bucket). + Where("object_id", path). + Take(&obj). + Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return api.ErrObjectNotFound + } else if err != nil { + return err + } + oum, err := s.objectMetadata(ctx, tx, bucket, path) + if err != nil { + return err + } + resp = api.Object{ + ObjectMetadata: newObjectMetadata( + obj.ObjectID, + obj.Etag, + obj.MimeType, + obj.Health, + obj.CreatedAt, + obj.Size, + ), + Metadata: oum, + } + return nil + }) + return resp, err +} + func (s *SQLStore) objectMetadata(ctx context.Context, tx *gorm.DB, bucket, path string) (api.ObjectUserMetadata, error) { var rows []dbObjectUserMetadata err := tx. @@ -2332,6 +2391,17 @@ func (s *SQLStore) objectMetadata(ctx context.Context, tx *gorm.DB, bucket, path return metadata, nil } +func newObjectMetadata(name, etag, mimeType string, health float64, modTime time.Time, size int64) api.ObjectMetadata { + return api.ObjectMetadata{ + ETag: etag, + Health: health, + ModTime: api.TimeRFC3339(modTime.UTC()), + Name: name, + Size: size, + MimeType: mimeType, + } +} + func (s *SQLStore) objectRaw(ctx context.Context, txn *gorm.DB, bucket string, path string) (rows rawObject, err error) { // NOTE: we LEFT JOIN here because empty objects are valid and need to be // included in the result set, when we convert the rawObject before @@ -2662,20 +2732,32 @@ func archiveContracts(ctx context.Context, tx *gorm.DB, contracts []dbContract, return nil } +func pruneSlabs(tx *gorm.DB) error { + // delete slabs without any associated slices or buffers + return tx.Exec(` +DELETE +FROM slabs +WHERE NOT EXISTS (SELECT 1 FROM slices WHERE slices.db_slab_id = slabs.id) +AND slabs.db_buffered_slab_id IS NULL +`).Error +} + // deleteObject deletes an object from the store and prunes all slabs which are // without an obect after the deletion. That means in case of packed uploads, // the slab is only deleted when no more objects point to it. -func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (numDeleted int64, _ error) { +func (s *SQLStore) deleteObject(tx *gorm.DB, bucket string, path string) (int64, error) { tx = tx.Where("object_id = ? AND ?", path, sqlWhereBucket("objects", bucket)). Delete(&dbObject{}) if tx.Error != nil { return 0, tx.Error } - numDeleted = tx.RowsAffected + numDeleted := tx.RowsAffected if numDeleted == 0 { return 0, nil // nothing to prune if no object was deleted + } else if err := pruneSlabs(tx); err != nil { + return numDeleted, err } - return + return numDeleted, nil } // deleteObjects deletes a batch of objects from the database. The order of @@ -2704,8 +2786,12 @@ func (s *SQLStore) deleteObjects(bucket string, path string) (numDeleted int64, if err := res.Error; err != nil { return res.Error } - duration = time.Since(start) + // prune slabs if we deleted an object rowsAffected = res.RowsAffected + if rowsAffected > 0 { + return pruneSlabs(tx) + } + duration = time.Since(start) return nil }); err != nil { return 0, fmt.Errorf("failed to delete objects: %w", err) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 96b06c4ec6..16e1046953 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "reflect" + "sort" "strings" "testing" "time" @@ -16,7 +17,6 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" "gorm.io/gorm" "gorm.io/gorm/schema" @@ -87,7 +87,7 @@ func TestObjectBasic(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(got.Object, want) { + if !reflect.DeepEqual(*got.Object, want) { t.Fatal("object mismatch", cmp.Diff(got.Object, want)) } @@ -118,7 +118,7 @@ func TestObjectBasic(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(got2.Object, want2) { + if !reflect.DeepEqual(*got2.Object, want2) { t.Fatal("object mismatch", cmp.Diff(got2.Object, want2)) } } @@ -175,7 +175,7 @@ func TestObjectMetadata(t *testing.T) { } // assert it matches - if !reflect.DeepEqual(got.Object, want) { + if !reflect.DeepEqual(*got.Object, want) { t.Log(got.Object) t.Log(want) t.Fatal("object mismatch", cmp.Diff(got.Object, want, cmp.AllowUnexported(object.EncryptionKey{}))) @@ -218,7 +218,7 @@ func TestSQLContractStore(t *testing.T) { } // Add an announcement. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } @@ -509,11 +509,11 @@ func TestRenewedContract(t *testing.T) { hk, hk2 := hks[0], hks[1] // Add announcements. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } - err = ss.insertTestAnnouncement(hk2, hostdb.Announcement{NetAddress: "address2"}) + err = ss.insertTestAnnouncement(hk2, newTestHostDBAnnouncement("address2")) if err != nil { t.Fatal(err) } @@ -1006,7 +1006,7 @@ func TestSQLMetadataStore(t *testing.T) { one := uint(1) expectedObj := dbObject{ - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, ObjectID: objID, Key: obj1Key, @@ -1069,7 +1069,7 @@ func TestSQLMetadataStore(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(fullObj.Object, obj1) { + if !reflect.DeepEqual(*fullObj.Object, obj1) { t.Fatal("object mismatch", cmp.Diff(fullObj, obj1)) } @@ -1167,6 +1167,7 @@ func TestSQLMetadataStore(t *testing.T) { slabs[i].Shards[0].Model = Model{} slabs[i].Shards[0].Contracts[0].Model = Model{} slabs[i].Shards[0].Contracts[0].Host.Model = Model{} + slabs[i].Shards[0].Contracts[0].Host.LastAnnouncement = time.Time{} slabs[i].HealthValidUntil = 0 } if !reflect.DeepEqual(slab1, expectedObjSlab1) { @@ -1182,7 +1183,7 @@ func TestSQLMetadataStore(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(fullObj.Object, obj1) { + if !reflect.DeepEqual(*fullObj.Object, obj1) { t.Fatal("object mismatch") } @@ -2211,10 +2212,9 @@ func TestUpdateSlab(t *testing.T) { t.Fatal(err) } var s dbSlab - if err := ss.db.Model(&dbSlab{}). + if err := ss.db.Where(&dbSlab{Key: key}). Joins("DBContractSet"). Preload("Shards"). - Where("key = ?", key). Take(&s). Error; err != nil { t.Fatal(err) @@ -2263,7 +2263,7 @@ func TestRecordContractSpending(t *testing.T) { } // Add an announcement. - err = ss.insertTestAnnouncement(hk, hostdb.Announcement{NetAddress: "address"}) + err = ss.insertTestAnnouncement(hk, newTestHostDBAnnouncement("address")) if err != nil { t.Fatal(err) } @@ -2435,7 +2435,7 @@ func TestObjectsStats(t *testing.T) { defer ss.Close() // Fetch stats on clean database. - info, err := ss.ObjectsStats(context.Background()) + info, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -2446,6 +2446,7 @@ func TestObjectsStats(t *testing.T) { // Create a few objects of different size. var objectsSize uint64 var sectorsSize uint64 + var totalUploadedSize uint64 for i := 0; i < 2; i++ { obj := newTestObject(1) objectsSize += uint64(obj.TotalSize()) @@ -2458,10 +2459,11 @@ func TestObjectsStats(t *testing.T) { t.Fatal(err) } for _, fcid := range fcids { - _, err := ss.addTestContract(fcid, hpk) + c, err := ss.addTestContract(fcid, hpk) if err != nil { t.Fatal(err) } + totalUploadedSize += c.Size } } } @@ -2482,10 +2484,11 @@ func TestObjectsStats(t *testing.T) { } var newContractID types.FileContractID frand.Read(newContractID[:]) - _, err = ss.addTestContract(newContractID, types.PublicKey{}) + c, err := ss.addTestContract(newContractID, types.PublicKey{}) if err != nil { t.Fatal(err) } + totalUploadedSize += c.Size newContract, err := ss.contract(context.Background(), fileContractID(newContractID)) if err != nil { t.Fatal(err) @@ -2499,21 +2502,37 @@ func TestObjectsStats(t *testing.T) { } // Check sizes. - info, err = ss.ObjectsStats(context.Background()) - if err != nil { - t.Fatal(err) - } - if info.TotalObjectsSize != objectsSize { - t.Fatal("wrong size", info.TotalObjectsSize, objectsSize) - } - if info.TotalSectorsSize != sectorsSize { - t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) - } - if info.TotalUploadedSize != sectorsSize*2 { - t.Fatal("wrong size", info.TotalUploadedSize, sectorsSize*2) + for _, opts := range []api.ObjectsStatsOpts{ + {}, // any bucket + {Bucket: api.DefaultBucketName}, // specific bucket + } { + info, err = ss.ObjectsStats(context.Background(), opts) + if err != nil { + t.Fatal(err) + } else if info.TotalObjectsSize != objectsSize { + t.Fatal("wrong size", info.TotalObjectsSize, objectsSize) + } else if info.TotalSectorsSize != sectorsSize { + t.Fatal("wrong size", info.TotalSectorsSize, sectorsSize) + } else if info.TotalUploadedSize != totalUploadedSize { + t.Fatal("wrong size", info.TotalUploadedSize, totalUploadedSize) + } else if info.NumObjects != 2 { + t.Fatal("wrong number of objects", info.NumObjects, 2) + } } - if info.NumObjects != 2 { - t.Fatal("wrong number of objects", info.NumObjects, 2) + + // Check other bucket. + if err := ss.CreateBucket(context.Background(), "other", api.BucketPolicy{}); err != nil { + t.Fatal(err) + } else if info, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{Bucket: "other"}); err != nil { + t.Fatal(err) + } else if info.TotalObjectsSize != 0 { + t.Fatal("wrong size", info.TotalObjectsSize) + } else if info.TotalSectorsSize != 0 { + t.Fatal("wrong size", info.TotalSectorsSize, 0) + } else if info.TotalUploadedSize != totalUploadedSize { + t.Fatal("wrong size", info.TotalUploadedSize, totalUploadedSize) + } else if info.NumObjects != 0 { + t.Fatal("wrong number of objects", info.NumObjects) } } @@ -2643,7 +2662,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj, fetched.Object) { + if !reflect.DeepEqual(obj, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj, fetched.Object, cmp.AllowUnexported(object.EncryptionKey{}))) } @@ -2679,7 +2698,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj2, fetched.Object) { + if !reflect.DeepEqual(obj2, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj2, fetched.Object)) } @@ -2727,7 +2746,7 @@ func TestPartialSlab(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(obj3, fetched.Object) { + if !reflect.DeepEqual(obj3, *fetched.Object) { t.Fatal("mismatch", cmp.Diff(obj3, fetched.Object, cmp.AllowUnexported(object.EncryptionKey{}))) } @@ -2908,7 +2927,7 @@ func TestContractSizes(t *testing.T) { } // assert there's two objects - s, err := ss.ObjectsStats(context.Background()) + s, err := ss.ObjectsStats(context.Background(), api.ObjectsStatsOpts{}) if err != nil { t.Fatal(err) } @@ -3563,8 +3582,10 @@ func TestDeleteHostSector(t *testing.T) { } // Prune the sector from hk1. - if err := ss.DeleteHostSector(context.Background(), hk1, root); err != nil { + if n, err := ss.DeleteHostSector(context.Background(), hk1, root); err != nil { t.Fatal(err) + } else if n != 2 { + t.Fatal("no sectors were pruned", n) } // Make sure 2 contractSector entries exist. @@ -3879,7 +3900,7 @@ func TestSlabCleanupTrigger(t *testing.T) { // create objects obj1 := dbObject{ ObjectID: "1", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj1).Error; err != nil { @@ -3887,7 +3908,7 @@ func TestSlabCleanupTrigger(t *testing.T) { } obj2 := dbObject{ ObjectID: "2", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj2).Error; err != nil { @@ -3923,7 +3944,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete the object - if err := ss.db.Delete(&obj1).Error; err != nil { + err := ss.RemoveObject(context.Background(), api.DefaultBucketName, obj1.ObjectID) + if err != nil { t.Fatal(err) } @@ -3936,7 +3958,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete second object - if err := ss.db.Delete(&obj2).Error; err != nil { + err = ss.RemoveObject(context.Background(), api.DefaultBucketName, obj2.ObjectID) + if err != nil { t.Fatal(err) } @@ -3960,7 +3983,7 @@ func TestSlabCleanupTrigger(t *testing.T) { } obj3 := dbObject{ ObjectID: "3", - DBBucketID: 1, + DBBucketID: ss.DefaultBucketID(), Health: 1, } if err := ss.db.Create(&obj3).Error; err != nil { @@ -3980,7 +4003,8 @@ func TestSlabCleanupTrigger(t *testing.T) { } // delete third object - if err := ss.db.Delete(&obj3).Error; err != nil { + err = ss.RemoveObject(context.Background(), api.DefaultBucketName, obj3.ObjectID) + if err != nil { t.Fatal(err) } if err := ss.db.Model(&dbSlab{}).Count(&slabCntr).Error; err != nil { @@ -4099,11 +4123,11 @@ func TestUpdateObjectReuseSlab(t *testing.T) { // fetch the object var dbObj dbObject - if err := ss.db.Where("db_bucket_id", 1).Take(&dbObj).Error; err != nil { + if err := ss.db.Where("db_bucket_id", ss.DefaultBucketID()).Take(&dbObj).Error; err != nil { t.Fatal(err) } else if dbObj.ID != 1 { t.Fatal("unexpected id", dbObj.ID) - } else if dbObj.DBBucketID != 1 { + } else if dbObj.DBBucketID != ss.DefaultBucketID() { t.Fatal("bucket id mismatch", dbObj.DBBucketID) } else if dbObj.ObjectID != "1" { t.Fatal("object id mismatch", dbObj.ObjectID) @@ -4205,7 +4229,7 @@ func TestUpdateObjectReuseSlab(t *testing.T) { // fetch the object var dbObj2 dbObject - if err := ss.db.Where("db_bucket_id", 1). + if err := ss.db.Where("db_bucket_id", ss.DefaultBucketID()). Where("object_id", "2"). Take(&dbObj2).Error; err != nil { t.Fatal(err) @@ -4284,3 +4308,99 @@ func TestUpdateObjectReuseSlab(t *testing.T) { } } } + +func TestTypeCurrency(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // prepare the table + if isSQLite(ss.db) { + if err := ss.db.Exec("CREATE TABLE currencies (id INTEGER PRIMARY KEY AUTOINCREMENT,c BLOB);").Error; err != nil { + t.Fatal(err) + } + } else { + if err := ss.db.Exec("CREATE TABLE currencies (id INT AUTO_INCREMENT PRIMARY KEY, c BLOB);").Error; err != nil { + t.Fatal(err) + } + } + + // insert currencies in random order + if err := ss.db.Exec("INSERT INTO currencies (c) VALUES (?),(?),(?);", bCurrency(types.MaxCurrency), bCurrency(types.NewCurrency64(1)), bCurrency(types.ZeroCurrency)).Error; err != nil { + t.Fatal(err) + } + + // fetch currencies and assert they're sorted + var currencies []bCurrency + if err := ss.db.Raw(`SELECT c FROM currencies ORDER BY c ASC`).Scan(¤cies).Error; err != nil { + t.Fatal(err) + } else if !sort.SliceIsSorted(currencies, func(i, j int) bool { + return types.Currency(currencies[i]).Cmp(types.Currency(currencies[j])) < 0 + }) { + t.Fatal("currencies not sorted", currencies) + } + + // convenience variables + c0 := currencies[0] + c1 := currencies[1] + cM := currencies[2] + + tests := []struct { + a bCurrency + b bCurrency + cmp string + }{ + { + a: c0, + b: c1, + cmp: "<", + }, + { + a: c1, + b: c0, + cmp: ">", + }, + { + a: c0, + b: c1, + cmp: "!=", + }, + { + a: c1, + b: c1, + cmp: "=", + }, + { + a: c0, + b: cM, + cmp: "<", + }, + { + a: cM, + b: c0, + cmp: ">", + }, + { + a: cM, + b: cM, + cmp: "=", + }, + } + for i, test := range tests { + var result bool + query := fmt.Sprintf("SELECT ? %s ?", test.cmp) + if !isSQLite(ss.db) { + query = strings.Replace(query, "?", "HEX(?)", -1) + } + if err := ss.db.Raw(query, test.a, test.b).Scan(&result).Error; err != nil { + t.Fatal(err) + } else if !result { + t.Errorf("unexpected result in case %d/%d: expected %v %s %v to be true", i+1, len(tests), types.Currency(test.a).String(), test.cmp, types.Currency(test.b).String()) + } else if test.cmp == "<" && types.Currency(test.a).Cmp(types.Currency(test.b)) >= 0 { + t.Fatal("invalid result") + } else if test.cmp == ">" && types.Currency(test.a).Cmp(types.Currency(test.b)) <= 0 { + t.Fatal("invalid result") + } else if test.cmp == "=" && types.Currency(test.a).Cmp(types.Currency(test.b)) != 0 { + t.Fatal("invalid result") + } + } +} diff --git a/stores/metrics.go b/stores/metrics.go index 203ed3b710..8816d17298 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -605,9 +605,7 @@ func (s *SQLStore) findPeriods(table string, dst interface{}, start time.Time, n WHERE ? GROUP BY p.period_start - ORDER BY - p.period_start ASC - ) i ON %s.id = i.id + ) i ON %s.id = i.id ORDER BY Period ASC `, table, table, table, table), unixTimeMS(start), interval.Milliseconds(), diff --git a/stores/metrics_test.go b/stores/metrics_test.go index 2b2f572a7d..f71d985bd3 100644 --- a/stores/metrics_test.go +++ b/stores/metrics_test.go @@ -517,7 +517,7 @@ func TestWalletMetrics(t *testing.T) { } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) }) { - t.Fatal("expected metrics to be sorted by time") + t.Fatalf("expected metrics to be sorted by time, %+v", metrics) } // Prune metrics diff --git a/stores/migrations.go b/stores/migrations.go index e79c6c36b9..d89be7ab55 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/go-gormigrate/gormigrate/v2" - "go.sia.tech/renterd/api" "go.uber.org/zap" "gorm.io/gorm" ) @@ -16,33 +15,8 @@ var ( errMySQLNoSuperPrivilege = errors.New("You do not have the SUPER privilege and binary logging is enabled") ) -// initSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initSchema(tx *gorm.DB) (err error) { - // Pick the right migrations. - var schema []byte - if isSQLite(tx) { - schema, err = migrations.ReadFile("migrations/sqlite/main/schema.sql") - } else { - schema, err = migrations.ReadFile("migrations/mysql/main/schema.sql") - } - if err != nil { - return - } - - // Run it. - err = tx.Exec(string(schema)).Error - if err != nil { - return fmt.Errorf("failed to init schema: %w", err) - } - - // Add default bucket. - return tx.Create(&dbBucket{ - Name: api.DefaultBucketName, - }).Error -} - func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { + dbIdentifier := "main" migrations := []*gormigrate.Migration{ { ID: "00001_init", @@ -51,26 +25,38 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { { ID: "00001_object_metadata", Migrate: func(tx *gorm.DB) error { - return performMigration(tx, "00001_object_metadata", logger) + return performMigration(tx, dbIdentifier, "00001_object_metadata", logger) }, }, { ID: "00002_prune_slabs_trigger", Migrate: func(tx *gorm.DB) error { - err := performMigration(tx, "00002_prune_slabs_trigger", logger) + err := performMigration(tx, dbIdentifier, "00002_prune_slabs_trigger", logger) if err != nil && strings.Contains(err.Error(), errMySQLNoSuperPrivilege.Error()) { logger.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") } return err }, }, + { + ID: "00003_idx_objects_size", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00003_idx_objects_size", logger) + }, + }, + { + ID: "00004_prune_slabs_cascade", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00004_prune_slabs_cascade", logger) + }, + }, } // Create migrator. m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initSchema) + m.InitSchema(initSchema(db, dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { @@ -78,30 +64,3 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { } return nil } - -func performMigration(db *gorm.DB, name string, logger *zap.SugaredLogger) error { - logger.Infof("performing migration %s", name) - - // build path - var path string - if isSQLite(db) { - path = fmt.Sprintf("migrations/sqlite/main/migration_" + name + ".sql") - } else { - path = fmt.Sprintf("migrations/mysql/main/migration_" + name + ".sql") - } - - // read migration file - migration, err := migrations.ReadFile(path) - if err != nil { - return fmt.Errorf("migration %s failed: %w", name, err) - } - - // execute it - err = db.Exec(string(migration)).Error - if err != nil { - return fmt.Errorf("migration %s failed: %w", name, err) - } - - logger.Infof("migration %s complete", name) - return nil -} diff --git a/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql b/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql new file mode 100644 index 0000000000..0df0b5d582 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00003_idx_objects_size.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_size` ON `objects`(`size`); diff --git a/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql new file mode 100644 index 0000000000..c2efe34673 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00004_prune_slabs_cascade.sql @@ -0,0 +1,16 @@ +-- drop triggers +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; + +-- add ON DELETE CASCADE to slices +ALTER TABLE slices DROP FOREIGN KEY fk_objects_slabs; +ALTER TABLE slices ADD CONSTRAINT fk_objects_slabs FOREIGN KEY (db_object_id) REFERENCES objects (id) ON DELETE CASCADE; + +ALTER TABLE slices DROP FOREIGN KEY fk_multipart_parts_slabs; +ALTER TABLE slices ADD CONSTRAINT fk_multipart_parts_slabs FOREIGN KEY (db_multipart_part_id) REFERENCES multipart_parts (id) ON DELETE CASCADE; + +-- add ON DELETE CASCADE to multipart_parts +ALTER TABLE multipart_parts DROP FOREIGN KEY fk_multipart_uploads_parts; +ALTER TABLE multipart_parts ADD CONSTRAINT fk_multipart_uploads_parts FOREIGN KEY (db_multipart_upload_id) REFERENCES multipart_uploads (id) ON DELETE CASCADE; \ No newline at end of file diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 39bf279f0f..a5ed86807c 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -310,7 +310,7 @@ CREATE TABLE `multipart_parts` ( KEY `idx_multipart_parts_etag` (`etag`), KEY `idx_multipart_parts_part_number` (`part_number`), KEY `idx_multipart_parts_db_multipart_upload_id` (`db_multipart_upload_id`), - CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) + CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbObject @@ -330,6 +330,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_object_id` (`object_id`), KEY `idx_objects_health` (`health`), KEY `idx_objects_etag` (`etag`), + KEY `idx_objects_size` (`size`), CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; @@ -373,8 +374,8 @@ CREATE TABLE `slices` ( KEY `idx_slices_object_index` (`object_index`), KEY `idx_slices_db_multipart_part_id` (`db_multipart_part_id`), KEY `idx_slices_db_slab_id` (`db_slab_id`), - CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts` (`id`), - CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`), + CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; @@ -420,36 +421,5 @@ CREATE TABLE `object_user_metadata` ( CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; --- dbObject trigger to delete from slices -CREATE TRIGGER before_delete_on_objects_delete_slices -BEFORE DELETE -ON objects FOR EACH ROW -DELETE FROM slices -WHERE slices.db_object_id = OLD.id; - --- dbMultipartUpload trigger to delete from dbMultipartPart -CREATE TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -BEFORE DELETE -ON multipart_uploads FOR EACH ROW -DELETE FROM multipart_parts -WHERE multipart_parts.db_multipart_upload_id = OLD.id; - --- dbMultipartPart trigger to delete from slices -CREATE TRIGGER before_delete_on_multipart_parts_delete_slices -BEFORE DELETE -ON multipart_parts FOR EACH ROW -DELETE FROM slices -WHERE slices.db_multipart_part_id = OLD.id; - --- dbSlices trigger to prune slabs -CREATE TRIGGER after_delete_on_slices_delete_slabs -AFTER DELETE -ON slices FOR EACH ROW -DELETE FROM slabs -WHERE slabs.id = OLD.db_slab_id -AND slabs.db_buffered_slab_id IS NULL -AND NOT EXISTS ( - SELECT 1 - FROM slices - WHERE slices.db_slab_id = OLD.db_slab_id -); \ No newline at end of file +-- create default bucket +INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); \ No newline at end of file diff --git a/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql b/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql new file mode 100644 index 0000000000..0df0b5d582 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00003_idx_objects_size.sql @@ -0,0 +1 @@ +CREATE INDEX `idx_objects_size` ON `objects`(`size`); diff --git a/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql new file mode 100644 index 0000000000..03f006acdb --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00004_prune_slabs_cascade.sql @@ -0,0 +1,30 @@ +-- drop triggers +DROP TRIGGER IF EXISTS before_delete_on_objects_delete_slices; +DROP TRIGGER IF EXISTS before_delete_on_multipart_uploads_delete_multipart_parts; +DROP TRIGGER IF EXISTS before_delete_on_multipart_parts_delete_slices; +DROP TRIGGER IF EXISTS after_delete_on_slices_delete_slabs; + +PRAGMA foreign_keys=off; +-- update constraints on slices +DROP TABLE IF EXISTS slices_temp; +CREATE TABLE `slices_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); +INSERT INTO slices_temp SELECT `id`, `created_at`, `db_object_id`, `object_index`, `db_multipart_part_id`, `db_slab_id`, `offset`, `length` FROM slices; +DROP TABLE slices; +ALTER TABLE slices_temp RENAME TO slices; + +CREATE INDEX `idx_slices_object_index` ON `slices`(`object_index`); +CREATE INDEX `idx_slices_db_object_id` ON `slices`(`db_object_id`); +CREATE INDEX `idx_slices_db_slab_id` ON `slices`(`db_slab_id`); +CREATE INDEX `idx_slices_db_multipart_part_id` ON `slices`(`db_multipart_part_id`); + +-- update constraints multipart_parts +DROP TABLE IF EXISTS multipart_parts_temp; +CREATE TABLE `multipart_parts_temp` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`) ON DELETE CASCADE); +INSERT INTO multipart_parts_temp SELECT * FROM multipart_parts; +DROP TABLE multipart_parts; +ALTER TABLE multipart_parts_temp RENAME TO multipart_parts; + +CREATE INDEX `idx_multipart_parts_db_multipart_upload_id` ON `multipart_parts`(`db_multipart_upload_id`); +CREATE INDEX `idx_multipart_parts_part_number` ON `multipart_parts`(`part_number`); +CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); +PRAGMA foreign_keys=on; diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index df9fc9a830..8d7afeaa1a 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -50,6 +50,7 @@ CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); CREATE INDEX `idx_objects_health` ON `objects`(`health`); CREATE INDEX `idx_objects_object_id` ON `objects`(`object_id`); +CREATE INDEX `idx_objects_size` ON `objects`(`size`); CREATE UNIQUE INDEX `idx_object_bucket` ON `objects`(`db_bucket_id`,`object_id`); -- dbMultipartUpload @@ -84,13 +85,13 @@ CREATE INDEX `idx_contract_sectors_db_contract_id` ON `contract_sectors`(`db_con CREATE INDEX `idx_contract_sectors_db_sector_id` ON `contract_sectors`(`db_sector_id`); -- dbMultipartPart -CREATE TABLE `multipart_parts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`)); +CREATE TABLE `multipart_parts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`etag` text,`part_number` integer,`size` integer,`db_multipart_upload_id` integer NOT NULL,CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads`(`id`) ON DELETE CASCADE); CREATE INDEX `idx_multipart_parts_db_multipart_upload_id` ON `multipart_parts`(`db_multipart_upload_id`); CREATE INDEX `idx_multipart_parts_part_number` ON `multipart_parts`(`part_number`); CREATE INDEX `idx_multipart_parts_etag` ON `multipart_parts`(`etag`); -- dbSlice -CREATE TABLE `slices` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`),CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`),CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); +CREATE TABLE `slices` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer,`object_index` integer,`db_multipart_part_id` integer,`db_slab_id` integer,`offset` integer,`length` integer,CONSTRAINT `fk_objects_slabs` FOREIGN KEY (`db_object_id`) REFERENCES `objects`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_multipart_parts_slabs` FOREIGN KEY (`db_multipart_part_id`) REFERENCES `multipart_parts`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs`(`id`)); CREATE INDEX `idx_slices_object_index` ON `slices`(`object_index`); CREATE INDEX `idx_slices_db_object_id` ON `slices`(`db_object_id`); CREATE INDEX `idx_slices_db_slab_id` ON `slices`(`db_slab_id`); @@ -147,40 +148,5 @@ CREATE UNIQUE INDEX `idx_module_event_url` ON `webhooks`(`module`,`event`,`url`) CREATE TABLE `object_user_metadata` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_object_id` integer DEFAULT NULL,`db_multipart_upload_id` integer DEFAULT NULL,`key` text NOT NULL,`value` text, CONSTRAINT `fk_object_user_metadata` FOREIGN KEY (`db_object_id`) REFERENCES `objects` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_multipart_upload_user_metadata` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE SET NULL); CREATE UNIQUE INDEX `idx_object_user_metadata_key` ON `object_user_metadata`(`db_object_id`,`db_multipart_upload_id`,`key`); --- dbObject trigger to delete from slices -CREATE TRIGGER before_delete_on_objects_delete_slices -BEFORE DELETE ON objects -BEGIN - DELETE FROM slices - WHERE slices.db_object_id = OLD.id; -END; - --- dbMultipartUpload trigger to delete from dbMultipartPart -CREATE TRIGGER before_delete_on_multipart_uploads_delete_multipart_parts -BEFORE DELETE ON multipart_uploads -BEGIN - DELETE FROM multipart_parts - WHERE multipart_parts.db_multipart_upload_id = OLD.id; -END; - --- dbMultipartPart trigger to delete from slices -CREATE TRIGGER before_delete_on_multipart_parts_delete_slices -BEFORE DELETE ON multipart_parts -BEGIN - DELETE FROM slices - WHERE slices.db_multipart_part_id = OLD.id; -END; - --- dbSlices trigger to prune slabs -CREATE TRIGGER after_delete_on_slices_delete_slabs -AFTER DELETE ON slices -BEGIN - DELETE FROM slabs - WHERE slabs.id = OLD.db_slab_id - AND slabs.db_buffered_slab_id IS NULL - AND NOT EXISTS ( - SELECT 1 - FROM slices - WHERE slices.db_slab_id = OLD.db_slab_id - ); -END; \ No newline at end of file +-- create default bucket +INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); diff --git a/stores/migrations_metrics.go b/stores/migrations_metrics.go index a95d7b914e..60c62c4761 100644 --- a/stores/migrations_metrics.go +++ b/stores/migrations_metrics.go @@ -8,30 +8,8 @@ import ( "gorm.io/gorm" ) -// initMetricsSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initMetricsSchema(tx *gorm.DB) error { - // Pick the right migrations. - var schema []byte - var err error - if isSQLite(tx) { - schema, err = migrations.ReadFile("migrations/sqlite/metrics/schema.sql") - } else { - schema, err = migrations.ReadFile("migrations/mysql/metrics/schema.sql") - } - if err != nil { - return err - } - - // Run it. - err = tx.Exec(string(schema)).Error - if err != nil { - return fmt.Errorf("failed to init schema: %w", err) - } - return nil -} - -func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { +func performMetricsMigrations(tx *gorm.DB, logger *zap.SugaredLogger) error { + dbIdentifier := "metrics" migrations := []*gormigrate.Migration{ { ID: "00001_init", @@ -40,10 +18,10 @@ func performMetricsMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { } // Create migrator. - m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) + m := gormigrate.New(tx, gormigrate.DefaultOptions, migrations) // Set init function. - m.InitSchema(initMetricsSchema) + m.InitSchema(initSchema(tx, dbIdentifier, logger)) // Perform migrations. if err := m.Migrate(); err != nil { diff --git a/stores/migrations_utils.go b/stores/migrations_utils.go new file mode 100644 index 0000000000..46d7f3dc42 --- /dev/null +++ b/stores/migrations_utils.go @@ -0,0 +1,57 @@ +package stores + +import ( + "fmt" + + gormigrate "github.com/go-gormigrate/gormigrate/v2" + "go.uber.org/zap" + "gorm.io/gorm" +) + +// initSchema is executed only on a clean database. Otherwise the individual +// migrations are executed. +func initSchema(db *gorm.DB, name string, logger *zap.SugaredLogger) gormigrate.InitSchemaFunc { + return func(tx *gorm.DB) error { + logger.Infof("initializing '%s' schema", name) + + // init schema + err := execSQLFile(tx, name, "schema") + if err != nil { + return fmt.Errorf("failed to init schema: %w", err) + } + + logger.Info("initialization complete") + return nil + } +} + +func performMigration(db *gorm.DB, kind, migration string, logger *zap.SugaredLogger) error { + logger.Infof("performing %s migration '%s'", kind, migration) + + // execute migration + err := execSQLFile(db, kind, fmt.Sprintf("migration_%s", migration)) + if err != nil { + return fmt.Errorf("migration '%s' failed: %w", migration, err) + } + + logger.Infof("migration '%s' complete", migration) + return nil +} + +func execSQLFile(db *gorm.DB, folder, filename string) error { + // build path + protocol := "mysql" + if isSQLite(db) { + protocol = "sqlite" + } + path := fmt.Sprintf("migrations/%s/%s/%s.sql", protocol, folder, filename) + + // read file + file, err := migrations.ReadFile(path) + if err != nil { + return err + } + + // execute it + return db.Exec(string(file)).Error +} diff --git a/stores/multipart.go b/stores/multipart.go index 18706ed0c0..3a5bcd54a4 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -295,6 +295,10 @@ func (s *SQLStore) AbortMultipartUpload(ctx context.Context, bucket, path string if err != nil { return fmt.Errorf("failed to delete multipart upload: %w", err) } + // Prune the slabs. + if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) + } return nil }) } @@ -435,6 +439,11 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str if err := tx.Delete(&mu).Error; err != nil { return fmt.Errorf("failed to delete multipart upload: %w", err) } + + // Prune the slabs. + if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) + } return nil }) if err != nil { diff --git a/stores/sql_test.go b/stores/sql_test.go index 3a51161aeb..776e3e10ef 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -48,6 +48,9 @@ type testSQLStore struct { } type testSQLStoreConfig struct { + dbURI string + dbUser string + dbPassword string dbName string dbMetricsName string dir string @@ -65,9 +68,26 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { if dir == "" { dir = t.TempDir() } - dbName := cfg.dbName + + dbURI, dbUser, dbPassword, dbName := DBConfigFromEnv() + if dbURI == "" { + dbURI = cfg.dbURI + } + if cfg.persistent && dbURI != "" { + t.Fatal("invalid store config, can't use both persistent and dbURI") + } + if dbUser == "" { + dbUser = cfg.dbUser + } + if dbPassword == "" { + dbPassword = cfg.dbPassword + } if dbName == "" { - dbName = hex.EncodeToString(frand.Bytes(32)) // random name for db + if cfg.dbName != "" { + dbName = cfg.dbName + } else { + dbName = hex.EncodeToString(frand.Bytes(32)) // random name for db + } } dbMetricsName := cfg.dbMetricsName if dbMetricsName == "" { @@ -75,7 +95,18 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { } var conn, connMetrics gorm.Dialector - if cfg.persistent { + if dbURI != "" { + if tmpDB, err := gorm.Open(NewMySQLConnection(dbUser, dbPassword, dbURI, "")); err != nil { + t.Fatal(err) + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbName)).Error; err != nil { + t.Fatal(err) + } else if err := tmpDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", dbMetricsName)).Error; err != nil { + t.Fatal(err) + } + + conn = NewMySQLConnection(dbUser, dbPassword, dbURI, dbName) + connMetrics = NewMySQLConnection(dbUser, dbPassword, dbURI, dbMetricsName) + } else if cfg.persistent { conn = NewSQLiteConnection(filepath.Join(cfg.dir, "db.sqlite")) connMetrics = NewSQLiteConnection(filepath.Join(cfg.dir, "metrics.sqlite")) } else { @@ -125,6 +156,18 @@ func (s *testSQLStore) Close() error { return nil } +func (s *testSQLStore) DefaultBucketID() uint { + var b dbBucket + if err := s.db. + Model(&dbBucket{}). + Where("name = ?", api.DefaultBucketName). + Take(&b). + Error; err != nil { + s.t.Fatal(err) + } + return b.ID +} + func (s *testSQLStore) Reopen() *testSQLStore { s.t.Helper() cfg := defaultTestSQLStoreConfig @@ -217,11 +260,13 @@ func (s *SQLStore) contractsCount() (cnt int64, err error) { func (s *SQLStore) overrideSlabHealth(objectID string, health float64) (err error) { err = s.db.Exec(fmt.Sprintf(` UPDATE slabs SET health = %v WHERE id IN ( - SELECT sla.id - FROM objects o - INNER JOIN slices sli ON o.id = sli.db_object_id - INNER JOIN slabs sla ON sli.db_slab_id = sla.id - WHERE o.object_id = "%s" + SELECT * FROM ( + SELECT sla.id + FROM objects o + INNER JOIN slices sli ON o.id = sli.db_object_id + INNER JOIN slabs sla ON sli.db_slab_id = sla.id + WHERE o.object_id = "%s" + ) AS sub )`, health, objectID)).Error return } @@ -283,11 +328,24 @@ func TestConsensusReset(t *testing.T) { } } -type queryPlanExplain struct { - ID int `json:"id"` - Parent int `json:"parent"` - NotUsed bool `json:"notused"` - Detail string `json:"detail"` +type sqliteQueryPlan struct { + Detail string `json:"detail"` +} + +func (p sqliteQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Detail) + return strings.Contains(d, "using index") || strings.Contains(d, "using covering index") +} + +//nolint:tagliatelle +type mysqlQueryPlan struct { + Extra string `json:"Extra"` + PossibleKeys string `json:"possible_keys"` +} + +func (p mysqlQueryPlan) usesIndex() bool { + d := strings.ToLower(p.Extra) + return strings.Contains(d, "using index") || strings.Contains(p.PossibleKeys, "idx_") } func TestQueryPlan(t *testing.T) { @@ -323,14 +381,20 @@ func TestQueryPlan(t *testing.T) { } for _, query := range queries { - var explain queryPlanExplain - err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error - if err != nil { - t.Fatal(err) - } - if !(strings.Contains(explain.Detail, "USING INDEX") || - strings.Contains(explain.Detail, "USING COVERING INDEX")) { - t.Fatalf("query '%s' should use an index, instead the plan was '%s'", query, explain.Detail) + if isSQLite(ss.db) { + var explain sqliteQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN QUERY PLAN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } + } else { + var explain mysqlQueryPlan + if err := ss.db.Raw(fmt.Sprintf("EXPLAIN %s;", query)).Scan(&explain).Error; err != nil { + t.Fatal(err) + } else if !explain.usesIndex() { + t.Fatalf("query '%s' should use an index, instead the plan was %+v", query, explain) + } } } } diff --git a/stores/types.go b/stores/types.go index 6b74f7563e..42a8d29e44 100644 --- a/stores/types.go +++ b/stores/types.go @@ -2,6 +2,7 @@ package stores import ( "database/sql/driver" + "encoding/binary" "encoding/json" "errors" "fmt" @@ -25,6 +26,7 @@ type ( unixTimeMS time.Time datetime time.Time currency types.Currency + bCurrency types.Currency fileContractID types.FileContractID hash256 types.Hash256 publicKey types.PublicKey @@ -338,3 +340,29 @@ func (u *unsigned64) Scan(value interface{}) error { func (u unsigned64) Value() (driver.Value, error) { return int64(u), nil } + +func (bCurrency) GormDataType() string { + return "bytes" +} + +// Scan implements the sql.Scanner interface. +func (sc *bCurrency) Scan(src any) error { + buf, ok := src.([]byte) + if !ok { + return fmt.Errorf("cannot scan %T to Currency", src) + } else if len(buf) != 16 { + return fmt.Errorf("cannot scan %d bytes to Currency", len(buf)) + } + + sc.Hi = binary.BigEndian.Uint64(buf[:8]) + sc.Lo = binary.BigEndian.Uint64(buf[8:]) + return nil +} + +// Value implements the driver.Valuer interface. +func (sc bCurrency) Value() (driver.Value, error) { + buf := make([]byte, 16) + binary.BigEndian.PutUint64(buf[:8], sc.Hi) + binary.BigEndian.PutUint64(buf[8:], sc.Lo) + return buf, nil +} diff --git a/stores/wallet.go b/stores/wallet.go index 679e96074e..d9bf51c395 100644 --- a/stores/wallet.go +++ b/stores/wallet.go @@ -130,7 +130,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { // Add/Remove siacoin outputs. for _, diff := range cc.SiacoinOutputDiffs { var sco types.SiacoinOutput - convertToCore(diff.SiacoinOutput, &sco) + convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) if sco.Address != s.walletAddress { continue } @@ -166,7 +166,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { continue } var sco types.SiacoinOutput - convertToCore(dsco.SiacoinOutput, &sco) + convertToCore(dsco.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) s.unappliedTxnChanges = append(s.unappliedTxnChanges, txnChange{ addition: true, txnID: hash256(dsco.ID), // use output id as txn id @@ -213,7 +213,7 @@ func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { for _, diff := range appliedDiff.SiacoinOutputDiffs { if diff.Direction == modules.DiffRevert { var so types.SiacoinOutput - convertToCore(diff.SiacoinOutput, &so) + convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&so)) spentOutputs[types.SiacoinOutputID(diff.ID)] = so } } diff --git a/worker/bench_test.go b/worker/bench_test.go new file mode 100644 index 0000000000..552eca17c6 --- /dev/null +++ b/worker/bench_test.go @@ -0,0 +1,101 @@ +package worker + +import ( + "bytes" + "context" + "io" + "testing" + + rhpv2 "go.sia.tech/core/rhp/v2" + "go.sia.tech/renterd/api" + "lukechampine.com/frand" +) + +// zeroReader is a reader that leaves the buffer unchanged and returns no error. +// It's useful for benchmarks that need to produce data for uploading and should +// be used together with a io.LimitReader. +type zeroReader struct{} + +func (z *zeroReader) Read(p []byte) (n int, err error) { + return len(p), nil +} + +// BenchmarkDownlaoderSingleObject benchmarks downloading a single, slab-sized +// object. +// 1036.74 MB/s | M2 Pro | c9dc1b6 +func BenchmarkDownloaderSingleObject(b *testing.B) { + w := newMockWorker() + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + w.addHosts(up.rs.TotalShards) + + data := bytes.NewReader(frand.Bytes(int(up.rs.SlabSizeNoRedundancy()))) + _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } + o, err := w.os.Object(context.Background(), testBucket, up.path, api.GetObjectOptions{}) + if err != nil { + b.Fatal(err) + } + + b.SetBytes(o.Object.Size) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = w.dl.DownloadObject(context.Background(), io.Discard, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + if err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkUploaderSingleObject benchmarks uploading a single object. +// +// Speed | CPU | Commit +// 433.86 MB/s | M2 Pro | bae6e77 +func BenchmarkUploaderSingleObject(b *testing.B) { + w := newMockWorker() + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + w.addHosts(up.rs.TotalShards) + + data := io.LimitReader(&zeroReader{}, int64(b.N*rhpv2.SectorSize*up.rs.MinShards)) + b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) + b.ResetTimer() + + _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } +} + +// BenchmarkUploaderSingleObject benchmarks uploading one object per slab. +// +// Speed | CPU | Commit +// 282.47 MB/s | M2 Pro | bae6e77 +func BenchmarkUploaderMultiObject(b *testing.B) { + w := newMockWorker() + + up := testParameters(b.TempDir()) + up.rs.MinShards = 10 + up.rs.TotalShards = 30 + up.packing = false + w.addHosts(up.rs.TotalShards) + + b.SetBytes(int64(rhpv2.SectorSize * up.rs.MinShards)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + data := io.LimitReader(&zeroReader{}, int64(rhpv2.SectorSize*up.rs.MinShards)) + _, _, err := w.ul.Upload(context.Background(), data, w.contracts(), up, lockingPriorityUpload) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/worker/download.go b/worker/download.go index 19c0f33946..427a38c30a 100644 --- a/worker/download.go +++ b/worker/download.go @@ -25,7 +25,7 @@ const ( ) var ( - errDownloadInterrupted = errors.New("download was interrupted") + errDownloadCancelled = errors.New("download was cancelled") errDownloadNotEnoughHosts = errors.New("not enough hosts available to download the slab") errHostNoLongerUsed = errors.New("host no longer used") ) @@ -209,12 +209,13 @@ func (mgr *downloadManager) DownloadObject(ctx context.Context, w io.Writer, o o hosts[c.HostKey] = struct{}{} } - // buffer the writer - bw := bufio.NewWriter(w) - defer bw.Flush() - // create the cipher writer - cw := o.Key.Decrypt(bw, offset) + cw := o.Key.Decrypt(w, offset) + + // buffer the writer we recover to making sure that we don't hammer the + // response writer with tiny writes + bw := bufio.NewWriter(cw) + defer bw.Flush() // create response chan and ensure it's closed properly var wg sync.WaitGroup @@ -318,7 +319,7 @@ outer: var resp *slabDownloadResponse select { case <-ctx.Done(): - return errDownloadInterrupted + return errDownloadCancelled case resp = <-responseChan: } @@ -341,7 +342,7 @@ outer: s := slabs[respIndex] if s.PartialSlab { // Partial slab. - _, err = cw.Write(s.Data) + _, err = bw.Write(s.Data) if err != nil { mgr.logger.Errorf("failed to send partial slab", respIndex, err) return err @@ -349,7 +350,7 @@ outer: } else { // Regular slab. slabs[respIndex].Decrypt(next.shards) - err := slabs[respIndex].Recover(cw, next.shards) + err := slabs[respIndex].Recover(bw, next.shards) if err != nil { mgr.logger.Errorf("failed to recover slab %v: %v", respIndex, err) return err @@ -834,8 +835,6 @@ loop: if isSectorNotFound(resp.err) { if err := s.mgr.os.DeleteHostSector(ctx, resp.req.host.PublicKey(), resp.req.root); err != nil { s.mgr.logger.Errorw("failed to mark sector as lost", "hk", resp.req.host.PublicKey(), "root", resp.req.root, zap.Error(err)) - } else { - s.mgr.logger.Infow("successfully marked sector as lost", "hk", resp.req.host.PublicKey(), "root", resp.req.root) } } else if isPriceTableGouging(resp.err) && s.overpay && !resp.req.overpay { resp.req.overpay = true // ensures we don't retry the same request over and over again diff --git a/worker/downloader_test.go b/worker/downloader_test.go index e6b066102d..d1fc28081a 100644 --- a/worker/downloader_test.go +++ b/worker/downloader_test.go @@ -16,7 +16,7 @@ func TestDownloaderStopped(t *testing.T) { req := sectorDownloadReq{ resps: §orResponses{ - c: make(chan struct{}), + c: make(chan struct{}, 1), }, } dl.enqueue(&req) diff --git a/worker/host.go b/worker/host.go index 86e92ce27e..a7932e04f5 100644 --- a/worker/host.go +++ b/worker/host.go @@ -21,7 +21,7 @@ type ( PublicKey() types.PublicKey DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) error - UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) + UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error FetchPriceTable(ctx context.Context, rev *types.FileContractRevision) (hpt hostdb.HostPriceTable, err error) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (types.FileContractRevision, error) @@ -52,7 +52,6 @@ type ( acc *account bus Bus contractSpendingRecorder ContractSpendingRecorder - interactionRecorder HostInteractionRecorder logger *zap.SugaredLogger transportPool *transportPoolV3 priceTables *priceTables @@ -70,7 +69,6 @@ func (w *worker) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr acc: w.accounts.ForHost(hk), bus: w.bus, contractSpendingRecorder: w.contractSpendingRecorder, - interactionRecorder: w.hostInteractionRecorder, logger: w.logger.Named(hk.String()[:4]), fcid: fcid, siamuxAddr: siamuxAddr, @@ -123,11 +121,11 @@ func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash2 }) } -func (h *host) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (root types.Hash256, err error) { +func (h *host) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (err error) { // fetch price table pt, err := h.priceTable(ctx, nil) if err != nil { - return types.Hash256{}, err + return err } // prepare payment @@ -136,28 +134,28 @@ func (h *host) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, // insufficient balance error expectedCost, _, _, err := uploadSectorCost(pt, rev.WindowEnd) if err != nil { - return types.Hash256{}, err + return err } if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, fmt.Errorf("revision number has reached max, fcid %v", rev.ParentID) + return fmt.Errorf("revision number has reached max, fcid %v", rev.ParentID) } payment, ok := rhpv3.PayByContract(&rev, expectedCost, h.acc.id, h.renterKey) if !ok { - return types.Hash256{}, errors.New("failed to create payment") + return errors.New("failed to create payment") } var cost types.Currency err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) error { - root, cost, err = RPCAppendSector(ctx, t, h.renterKey, pt, &rev, &payment, sector) + cost, err = RPCAppendSector(ctx, t, h.renterKey, pt, &rev, &payment, sectorRoot, sector) return err }) if err != nil { - return types.Hash256{}, err + return err } // record spending h.contractSpendingRecorder.Record(rev, api.ContractSpending{Uploads: cost}) - return root, nil + return nil } func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _ types.Currency, err error) { @@ -198,11 +196,13 @@ func (h *host) FetchPriceTable(ctx context.Context, rev *types.FileContractRevis fetchPT := func(paymentFn PriceTablePaymentFunc) (hpt hostdb.HostPriceTable, err error) { err = h.transportPool.withTransportV3(ctx, h.hk, h.siamuxAddr, func(ctx context.Context, t *transportV3) (err error) { hpt, err = RPCPriceTable(ctx, t, paymentFn) - h.interactionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ - HostKey: h.hk, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - PriceTable: hpt, + h.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + { + HostKey: h.hk, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + PriceTable: hpt, + }, }) return }) @@ -322,3 +322,18 @@ func (h *host) preparePriceTableContractPayment(rev *types.FileContractRevision) return &payment, nil } } + +func isSuccessfulInteraction(err error) bool { + // No error always means success. + if err == nil { + return true + } + // List of errors that are considered successful interactions. + if isInsufficientFunds(err) { + return true + } + if isBalanceInsufficient(err) { + return true + } + return false +} diff --git a/worker/host_test.go b/worker/host_test.go index 87d35fb364..78ce6b74e5 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -16,11 +16,9 @@ func TestHost(t *testing.T) { sector, root := newMockSector() // upload the sector - uploaded, err := h.UploadSector(context.Background(), sector, types.FileContractRevision{}) + err := h.UploadSector(context.Background(), rhpv2.SectorRoot(sector), sector, types.FileContractRevision{}) if err != nil { t.Fatal(err) - } else if uploaded != root { - t.Fatal("root mismatch") } // download entire sector diff --git a/worker/interactions.go b/worker/interactions.go deleted file mode 100644 index 4527a12203..0000000000 --- a/worker/interactions.go +++ /dev/null @@ -1,147 +0,0 @@ -package worker - -import ( - "context" - "fmt" - "sync" - "time" - - "go.sia.tech/renterd/hostdb" - "go.uber.org/zap" -) - -const ( - keyInteractionRecorder contextKey = "InteractionRecorder" -) - -type ( - HostInteractionRecorder interface { - RecordHostScan(...hostdb.HostScan) - RecordPriceTableUpdate(...hostdb.PriceTableUpdate) - Stop(context.Context) - } - - hostInteractionRecorder struct { - flushInterval time.Duration - - bus Bus - logger *zap.SugaredLogger - - mu sync.Mutex - hostScans []hostdb.HostScan - priceTableUpdates []hostdb.PriceTableUpdate - - flushCtx context.Context - flushTimer *time.Timer - } -) - -var ( - _ HostInteractionRecorder = (*hostInteractionRecorder)(nil) -) - -func (w *worker) initHostInteractionRecorder(flushInterval time.Duration) { - if w.hostInteractionRecorder != nil { - panic("HostInteractionRecorder already initialized") // developer error - } - w.hostInteractionRecorder = &hostInteractionRecorder{ - bus: w.bus, - logger: w.logger, - - flushCtx: w.shutdownCtx, - flushInterval: flushInterval, - - hostScans: make([]hostdb.HostScan, 0), - priceTableUpdates: make([]hostdb.PriceTableUpdate, 0), - } -} - -func (r *hostInteractionRecorder) RecordHostScan(scans ...hostdb.HostScan) { - r.mu.Lock() - defer r.mu.Unlock() - r.hostScans = append(r.hostScans, scans...) - r.tryFlushInteractionsBuffer() -} - -func (r *hostInteractionRecorder) RecordPriceTableUpdate(ptUpdates ...hostdb.PriceTableUpdate) { - r.mu.Lock() - defer r.mu.Unlock() - r.priceTableUpdates = append(r.priceTableUpdates, ptUpdates...) - r.tryFlushInteractionsBuffer() -} - -func (r *hostInteractionRecorder) Stop(ctx context.Context) { - // stop the flush timer - r.mu.Lock() - if r.flushTimer != nil { - r.flushTimer.Stop() - } - r.flushCtx = ctx - r.mu.Unlock() - - // flush all interactions - r.flush() - - // log if we weren't able to flush them - r.mu.Lock() - if len(r.hostScans) > 0 { - r.logger.Error("not all host scans were recorded during shutdown") - } - if len(r.priceTableUpdates) > 0 { - r.logger.Error("not all price table updates were recorded during shutdown") - } - r.mu.Unlock() -} - -func (r *hostInteractionRecorder) flush() { - r.mu.Lock() - defer r.mu.Unlock() - - // reset timer - r.flushTimer = nil - - // NOTE: don't bother flushing if the context is cancelled, we can safely - // ignore the buffered scans and price tables since we'll flush on shutdown - // and log in case we weren't able to flush all interactions to the bus - select { - case <-r.flushCtx.Done(): - return - default: - } - - if len(r.hostScans) > 0 { - if err := r.bus.RecordHostScans(r.flushCtx, r.hostScans); err != nil { - r.logger.Errorw(fmt.Sprintf("failed to record scans: %v", err)) - } else { - r.hostScans = nil - } - } - if len(r.priceTableUpdates) > 0 { - if err := r.bus.RecordPriceTables(r.flushCtx, r.priceTableUpdates); err != nil { - r.logger.Errorw(fmt.Sprintf("failed to record price table updates: %v", err)) - } else { - r.priceTableUpdates = nil - } - } -} - -func (r *hostInteractionRecorder) tryFlushInteractionsBuffer() { - if r.flushTimer == nil { - r.flushTimer = time.AfterFunc(r.flushInterval, r.flush) - } -} - -func isSuccessfulInteraction(err error) bool { - // No error always means success. - if err == nil { - return true - } - // List of errors that are considered successful interactions. - if isInsufficientFunds(err) { - return true - } - if isBalanceInsufficient(err) { - return true - } - return false -} diff --git a/worker/mocks_test.go b/worker/mocks_test.go index e6fd62d8ea..a28e9256ca 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -274,7 +274,7 @@ func (os *mockObjectStore) Object(ctx context.Context, bucket, path string, opts return api.ObjectsResponse{Object: &api.Object{ ObjectMetadata: api.ObjectMetadata{Name: path, Size: o.TotalSize()}, - Object: o, + Object: &o, }}, nil } @@ -396,8 +396,9 @@ func (h *mockHost) DownloadSector(ctx context.Context, w io.Writer, root types.H return err } -func (h *mockHost) UploadSector(ctx context.Context, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) (types.Hash256, error) { - return h.contract().addSector(sector), nil +func (h *mockHost) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error { + h.contract().addSector(sectorRoot, sector) + return nil } func (h *mockHost) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (rev types.FileContractRevision, _ error) { @@ -448,12 +449,10 @@ func newMockContract(hk types.PublicKey, fcid types.FileContractID) *mockContrac } } -func (c *mockContract) addSector(sector *[rhpv2.SectorSize]byte) (root types.Hash256) { - root = rhpv2.SectorRoot(sector) +func (c *mockContract) addSector(sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte) { c.mu.Lock() - c.sectors[root] = sector + c.sectors[sectorRoot] = sector c.mu.Unlock() - return } func (c *mockContract) sector(root types.Hash256) (sector *[rhpv2.SectorSize]byte, found bool) { diff --git a/worker/rhpv3.go b/worker/rhpv3.go index 5fbcd3ad6d..45a3610e24 100644 --- a/worker/rhpv3.go +++ b/worker/rhpv3.go @@ -385,19 +385,14 @@ func withAccountLock(ctx context.Context, as AccountStore, id rhpv3.Account, hk if err != nil { return err } + err = fn(acc) - defer func() { - select { - case <-ctx.Done(): - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(context.Background(), time.Minute) - defer cancel() - default: - } - as.UnlockAccount(ctx, acc.ID, lockID) - }() + // unlock account + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + _ = as.UnlockAccount(ctx, acc.ID, lockID) // ignore error + cancel() - return fn(acc) + return err } // Balance returns the account balance. @@ -450,38 +445,25 @@ func (a *account) WithWithdrawal(ctx context.Context, amtFn func() (types.Curren // execute amtFn amt, err := amtFn() + + // in case of an insufficient balance, we schedule a sync if isBalanceInsufficient(err) { - // in case of an insufficient balance, we schedule a sync - if scheduleErr := a.scheduleSync(); scheduleErr != nil { - err = fmt.Errorf("%w; failed to set requiresSync flag on bus, error: %v", err, scheduleErr) - } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = errors.Join(err, a.as.ScheduleSync(ctx, a.id, a.host)) + cancel() } - // if an amount was returned, we withdraw it. - if withdrawErr := a.withdrawFromBalance(amt); withdrawErr != nil { - err = fmt.Errorf("%w; failed to withdraw from account, error: %v", err, withdrawErr) + // if an amount was returned, we withdraw it + if !amt.IsZero() { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err = errors.Join(err, a.as.AddBalance(ctx, a.id, a.host, new(big.Int).Neg(amt.Big()))) + cancel() } return err }) } -func (a *account) withdrawFromBalance(amt types.Currency) error { - if amt.IsZero() { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - return a.as.AddBalance(ctx, a.id, a.host, new(big.Int).Neg(amt.Big())) -} - -func (a *account) scheduleSync() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - return a.as.ScheduleSync(ctx, a.id, a.host) -} - // deriveAccountKey derives an account plus key for a given host and worker. // Each worker has its own account for a given host. That makes concurrency // around keeping track of an accounts balance and refilling it a lot easier in @@ -782,17 +764,17 @@ func RPCReadSector(ctx context.Context, t *transportV3, w io.Writer, pt rhpv3.Ho return } -func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.PrivateKey, pt rhpv3.HostPriceTable, rev *types.FileContractRevision, payment rhpv3.PaymentMethod, sector *[rhpv2.SectorSize]byte) (sectorRoot types.Hash256, cost types.Currency, err error) { +func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.PrivateKey, pt rhpv3.HostPriceTable, rev *types.FileContractRevision, payment rhpv3.PaymentMethod, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte) (cost types.Currency, err error) { defer wrapErr(&err, "AppendSector") // sanity check revision first if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, types.ZeroCurrency, errMaxRevisionReached + return types.ZeroCurrency, errMaxRevisionReached } s, err := t.DialStream(ctx) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } defer s.Close() @@ -822,7 +804,7 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat // compute expected collateral and refund expectedCost, expectedCollateral, expectedRefund, err := uploadSectorCost(pt, rev.WindowEnd) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } // apply leeways. @@ -833,13 +815,13 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat // check if the cost, collateral and refund match our expectation. if executeResp.TotalCost.Cmp(expectedCost) > 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("cost exceeds expectation: %v > %v", executeResp.TotalCost.String(), expectedCost.String()) + return types.ZeroCurrency, fmt.Errorf("cost exceeds expectation: %v > %v", executeResp.TotalCost.String(), expectedCost.String()) } if executeResp.FailureRefund.Cmp(expectedRefund) < 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("insufficient refund: %v < %v", executeResp.FailureRefund.String(), expectedRefund.String()) + return types.ZeroCurrency, fmt.Errorf("insufficient refund: %v < %v", executeResp.FailureRefund.String(), expectedRefund.String()) } if executeResp.AdditionalCollateral.Cmp(expectedCollateral) < 0 { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("insufficient collateral: %v < %v", executeResp.AdditionalCollateral.String(), expectedCollateral.String()) + return types.ZeroCurrency, fmt.Errorf("insufficient collateral: %v < %v", executeResp.AdditionalCollateral.String(), expectedCollateral.String()) } // set the cost and refund @@ -863,18 +845,17 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat collateral := executeResp.AdditionalCollateral.Add(executeResp.FailureRefund) // check proof - sectorRoot = rhpv2.SectorRoot(sector) if rev.Filesize == 0 { // For the first upload to a contract we don't get a proof. So we just // assert that the new contract root matches the root of the sector. if rev.Filesize == 0 && executeResp.NewMerkleRoot != sectorRoot { - return types.Hash256{}, types.ZeroCurrency, fmt.Errorf("merkle root doesn't match the sector root upon first upload to contract: %v != %v", executeResp.NewMerkleRoot, sectorRoot) + return types.ZeroCurrency, fmt.Errorf("merkle root doesn't match the sector root upon first upload to contract: %v != %v", executeResp.NewMerkleRoot, sectorRoot) } } else { // Otherwise we make sure the proof was transmitted and verify it. actions := []rhpv2.RPCWriteAction{{Type: rhpv2.RPCWriteActionAppend}} // TODO: change once rhpv3 support is available if !rhpv2.VerifyDiffProof(actions, rev.Filesize/rhpv2.SectorSize, executeResp.Proof, []types.Hash256{}, rev.FileMerkleRoot, executeResp.NewMerkleRoot, []types.Hash256{sectorRoot}) { - return types.Hash256{}, types.ZeroCurrency, errors.New("proof verification failed") + return types.ZeroCurrency, errors.New("proof verification failed") } } @@ -882,7 +863,7 @@ func RPCAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat newRevision := *rev newValid, newMissed, err := updateRevisionOutputs(&newRevision, types.ZeroCurrency, collateral) if err != nil { - return types.Hash256{}, types.ZeroCurrency, err + return types.ZeroCurrency, err } newRevision.Filesize += rhpv2.SectorSize newRevision.RevisionNumber++ diff --git a/worker/upload.go b/worker/upload.go index acfbfe3891..9a79347bbf 100644 --- a/worker/upload.go +++ b/worker/upload.go @@ -139,9 +139,8 @@ type ( } sectorUploadResp struct { - req *sectorUploadReq - root types.Hash256 - err error + req *sectorUploadReq + err error } ) @@ -197,7 +196,7 @@ func (w *worker) upload(ctx context.Context, r io.Reader, contracts []api.Contra // try and upload one slab synchronously if bufferSizeLimitReached { - mem := w.uploadManager.mm.AcquireMemory(ctx, up.rs.SlabSizeWithRedundancy()) + mem := w.uploadManager.mm.AcquireMemory(ctx, up.rs.SlabSize()) if mem != nil { _, err := w.tryUploadPackedSlab(ctx, mem, defaultPackedSlabsLockDuration, up.rs, up.contractSet, lockingPriorityBlockedUpload) if err != nil { @@ -242,7 +241,7 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe var wg sync.WaitGroup for { // block until we have memory - mem := w.uploadManager.mm.AcquireMemory(interruptCtx, rs.SlabSizeWithRedundancy()) + mem := w.uploadManager.mm.AcquireMemory(interruptCtx, rs.SlabSize()) if mem == nil { break // interrupted } @@ -258,9 +257,6 @@ func (w *worker) threadedUploadPackedSlabs(rs api.RedundancySettings, contractSe ctx, cancel := context.WithTimeout(context.Background(), defaultPackedSlabsUploadTimeout) defer cancel() - // attach interaction recorder to the context - ctx = context.WithValue(ctx, keyInteractionRecorder, w) - // try to upload a packed slab, if there were no packed slabs left to upload ok is false ok, err := w.tryUploadPackedSlab(ctx, mem, defaultPackedSlabsLockDuration, rs, contractSet, lockPriority) if err != nil { @@ -433,11 +429,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // create the object o := object.NewObject(up.ec) - // create the hash reader - hr := newHashReader(r) - // create the cipher reader - cr, err := o.Encrypt(hr, up.encryptionOffset) + cr, err := o.Encrypt(r, up.encryptionOffset) if err != nil { return false, "", err } @@ -462,7 +455,7 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a // prepare slab sizes slabSize := up.rs.SlabSize() - slabSizeWithRedundancy := up.rs.SlabSizeWithRedundancy() + slabSizeNoRedundancy := up.rs.SlabSizeNoRedundancy() var partialSlab []byte // launch uploads in a separate goroutine @@ -476,7 +469,7 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a } // acquire memory - mem := mgr.mm.AcquireMemory(ctx, slabSizeWithRedundancy) + mem := mgr.mm.AcquireMemory(ctx, slabSize) if mem == nil { return // interrupted } @@ -497,8 +490,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a } // read next slab's data - data := make([]byte, slabSize) - length, err := io.ReadFull(io.LimitReader(cr, int64(slabSize)), data) + data := make([]byte, slabSizeNoRedundancy) + length, err := io.ReadFull(io.LimitReader(cr, int64(slabSizeNoRedundancy)), data) if err == io.EOF { mem.Release() @@ -569,8 +562,8 @@ func (mgr *uploadManager) Upload(ctx context.Context, r io.Reader, contracts []a o.Slabs = append(o.Slabs, resp.slab) } - // calculate the eTag - eTag = hr.Hash() + // compute etag + eTag = o.ComputeETag() // add partial slabs if len(partialSlab) > 0 { @@ -822,20 +815,32 @@ func (u *upload) newSlabUpload(ctx context.Context, shards [][]byte, uploaders [ responseChan := make(chan sectorUploadResp) // prepare sectors + var wg sync.WaitGroup sectors := make([]*sectorUpload, len(shards)) - for sI, shard := range shards { - // create the ctx - sCtx, sCancel := context.WithCancel(ctx) - - // create the sector - sectors[sI] = §orUpload{ - data: (*[rhpv2.SectorSize]byte)(shard), - index: sI, - root: rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(shard)), - ctx: sCtx, - cancel: sCancel, - } + for sI := range shards { + wg.Add(1) + go func(idx int) { + // create the ctx + sCtx, sCancel := context.WithCancel(ctx) + + // create the sector + // NOTE: we are computing the sector root here and pass it all the + // way down to the RPC to avoid having to recompute it for the proof + // verification. This is necessary because we need it ahead of time + // for the call to AddUploadingSector in uploader.go + // Once we upload to temp storage we don't need AddUploadingSector + // anymore and can move it back to the RPC. + sectors[idx] = §orUpload{ + data: (*[rhpv2.SectorSize]byte)(shards[idx]), + index: idx, + root: rhpv2.SectorRoot((*[rhpv2.SectorSize]byte)(shards[idx])), + ctx: sCtx, + cancel: sCancel, + } + wg.Done() + }(sI) } + wg.Wait() // prepare candidates candidates := make([]*candidate, len(uploaders)) @@ -890,8 +895,6 @@ func (u *upload) uploadSlab(ctx context.Context, rs api.RedundancySettings, data } func (u *upload) uploadShards(ctx context.Context, shards [][]byte, candidates []*uploader, mem Memory, maxOverdrive uint64, overdriveTimeout time.Duration) (sectors []object.Sector, uploadSpeed int64, overdrivePct float64, err error) { - start := time.Now() - // ensure inflight uploads get cancelled ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -928,6 +931,10 @@ func (u *upload) uploadShards(ctx context.Context, shards [][]byte, candidates [ // create a request buffer var buffer []*sectorUploadReq + // start the timer after the upload has started + // newSlabUpload is quite slow due to computing the sector roots + start := time.Now() + // collect responses var used bool var done bool @@ -985,6 +992,9 @@ loop: // calculate the upload speed bytes := slab.numUploaded * rhpv2.SectorSize ms := time.Since(start).Milliseconds() + if ms == 0 { + ms = 1 + } uploadSpeed = int64(bytes) / ms // calculate overdrive pct @@ -1112,12 +1122,6 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { return false, false } - // sanity check we receive the expected root - if resp.root != req.sector.root { - s.errs[req.hk] = fmt.Errorf("root mismatch, %v != %v", resp.root, req.sector.root) - return false, false - } - // redundant sectors can't complete the upload if sector.uploaded.Root != (types.Hash256{}) { return false, false @@ -1127,7 +1131,7 @@ func (s *slabUpload) receive(resp sectorUploadResp) (bool, bool) { sector.finish(object.Sector{ Contracts: map[types.PublicKey][]types.FileContractID{req.hk: {req.fcid}}, LatestHost: req.hk, - Root: resp.root, + Root: req.sector.root, }) // update uploaded sectors @@ -1174,7 +1178,7 @@ func (req *sectorUploadReq) done() bool { } } -func (req *sectorUploadReq) fail(err error) { +func (req *sectorUploadReq) finish(err error) { select { case <-req.sector.ctx.Done(): case req.responseChan <- sectorUploadResp{ @@ -1183,13 +1187,3 @@ func (req *sectorUploadReq) fail(err error) { }: } } - -func (req *sectorUploadReq) succeed(root types.Hash256) { - select { - case <-req.sector.ctx.Done(): - case req.responseChan <- sectorUploadResp{ - req: req, - root: root, - }: - } -} diff --git a/worker/upload_test.go b/worker/upload_test.go index 9a285efa5c..a311fa058c 100644 --- a/worker/upload_test.go +++ b/worker/upload_test.go @@ -64,7 +64,7 @@ func TestUpload(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -90,7 +90,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -107,7 +107,7 @@ func TestUpload(t *testing.T) { // download the data again and assert it fails buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), filtered) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), filtered) if !errors.Is(err, errDownloadNotEnoughHosts) { t.Fatal("expected not enough hosts error", err) } @@ -170,7 +170,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -185,7 +185,7 @@ func TestUploadPackedSlab(t *testing.T) { t.Fatal("expected 1 packed slab") } ps := pss[0] - mem := mm.AcquireMemory(context.Background(), params.rs.SlabSizeWithRedundancy()) + mem := mm.AcquireMemory(context.Background(), params.rs.SlabSize()) // upload the packed slab err = ul.UploadPackedSlab(context.Background(), params.rs, ps, mem, w.contracts(), 0, lockingPriorityUpload) @@ -206,7 +206,7 @@ func TestUploadPackedSlab(t *testing.T) { // download the data again and assert it matches buf.Reset() - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -325,7 +325,7 @@ func TestUploadShards(t *testing.T) { // download the data and assert it matches var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), contracts) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), contracts) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { @@ -485,7 +485,7 @@ func TestUploadRegression(t *testing.T) { // download data for good measure var buf bytes.Buffer - err = dl.DownloadObject(context.Background(), &buf, o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) + err = dl.DownloadObject(context.Background(), &buf, *o.Object.Object, 0, uint64(o.Object.Size), w.contracts()) if err != nil { t.Fatal(err) } else if !bytes.Equal(data, buf.Bytes()) { diff --git a/worker/upload_utils.go b/worker/upload_utils.go index 4b5241b4d4..306e1774fb 100644 --- a/worker/upload_utils.go +++ b/worker/upload_utils.go @@ -2,11 +2,9 @@ package worker import ( "bytes" - "encoding/hex" "io" "github.com/gabriel-vasile/mimetype" - "go.sia.tech/core/types" "go.sia.tech/renterd/object" ) @@ -28,28 +26,3 @@ func newMimeReader(r io.Reader) (mimeType string, recycled io.Reader, err error) recycled = io.MultiReader(buf, r) return mtype.String(), recycled, err } - -type hashReader struct { - r io.Reader - h *types.Hasher -} - -func newHashReader(r io.Reader) *hashReader { - return &hashReader{ - r: r, - h: types.NewHasher(), - } -} - -func (e *hashReader) Read(p []byte) (int, error) { - n, err := e.r.Read(p) - if _, wErr := e.h.E.Write(p[:n]); wErr != nil { - return 0, wErr - } - return n, err -} - -func (e *hashReader) Hash() string { - sum := e.h.Sum() - return hex.EncodeToString(sum[:]) -} diff --git a/worker/uploader.go b/worker/uploader.go index da3c7caf70..2cf090e730 100644 --- a/worker/uploader.go +++ b/worker/uploader.go @@ -107,7 +107,7 @@ outer: } // execute it - root, elapsed, err := u.execute(req) + elapsed, err := u.execute(req) // the uploader's contract got renewed, requeue the request if errors.Is(err, errMaxRevisionReached) { @@ -118,10 +118,12 @@ outer: } // send the response - if err != nil { - req.fail(err) - } else { - req.succeed(root) + select { + case <-req.sector.ctx.Done(): + case req.responseChan <- sectorUploadResp{ + req: req, + err: err, + }: } // track the error, ignore gracefully closed streams and canceled overdrives @@ -144,7 +146,7 @@ func (u *uploader) Stop(err error) { break } if !upload.done() { - upload.fail(err) + upload.finish(err) } } } @@ -154,7 +156,7 @@ func (u *uploader) enqueue(req *sectorUploadReq) { // check for stopped if u.stopped { u.mu.Unlock() - go req.fail(errUploaderStopped) // don't block the caller + go req.finish(errUploaderStopped) // don't block the caller return } @@ -185,7 +187,7 @@ func (u *uploader) estimate() float64 { return numSectors * estimateP90 } -func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, error) { +func (u *uploader) execute(req *sectorUploadReq) (time.Duration, error) { // grab fields u.mu.Lock() host := u.host @@ -195,7 +197,7 @@ func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, // acquire contract lock lockID, err := u.cs.AcquireContract(req.sector.ctx, fcid, req.contractLockPriority, req.contractLockDuration) if err != nil { - return types.Hash256{}, 0, err + return 0, err } // defer the release @@ -214,26 +216,26 @@ func (u *uploader) execute(req *sectorUploadReq) (types.Hash256, time.Duration, // fetch the revision rev, err := host.FetchRevision(ctx, defaultRevisionFetchTimeout) if err != nil { - return types.Hash256{}, 0, err + return 0, err } else if rev.RevisionNumber == math.MaxUint64 { - return types.Hash256{}, 0, errMaxRevisionReached + return 0, errMaxRevisionReached } // update the bus if err := u.os.AddUploadingSector(ctx, req.uploadID, fcid, req.sector.root); err != nil { - return types.Hash256{}, 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to add uploading sector to contract %v, err: %v", fcid, err) } // upload the sector start := time.Now() - root, err := host.UploadSector(ctx, req.sector.sectorData(), rev) + err = host.UploadSector(ctx, req.sector.root, req.sector.sectorData(), rev) if err != nil { - return types.Hash256{}, 0, fmt.Errorf("failed to upload sector to contract %v, err: %v", fcid, err) + return 0, fmt.Errorf("failed to upload sector to contract %v, err: %v", fcid, err) } // calculate elapsed time elapsed := time.Since(start) - return root, elapsed, nil + return elapsed, nil } func (u *uploader) pop() *sectorUploadReq { diff --git a/worker/worker.go b/worker/worker.go index b2e3297a88..00090fb6e3 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -201,7 +201,6 @@ type worker struct { uploadsMu sync.Mutex uploadingPackedSlabs map[string]bool - hostInteractionRecorder HostInteractionRecorder contractSpendingRecorder ContractSpendingRecorder contractLockingDuration time.Duration @@ -346,11 +345,13 @@ func (w *worker) rhpPriceTableHandler(jc jape.Context) { var err error var hpt hostdb.HostPriceTable defer func() { - w.hostInteractionRecorder.RecordPriceTableUpdate(hostdb.PriceTableUpdate{ - HostKey: rptr.HostKey, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - PriceTable: hpt, + w.bus.RecordPriceTables(ctx, []hostdb.PriceTableUpdate{ + { + HostKey: rptr.HostKey, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + PriceTable: hpt, + }, }) }() @@ -923,10 +924,12 @@ func (w *worker) objectsHandlerGET(jc jape.Context) { // create a download function downloadFn := func(wr io.Writer, offset, length int64) (err error) { ctx = WithGougingChecker(ctx, w.bus, gp) - err = w.downloadManager.DownloadObject(ctx, wr, res.Object.Object, uint64(offset), uint64(length), contracts) + err = w.downloadManager.DownloadObject(ctx, wr, *res.Object.Object, uint64(offset), uint64(length), contracts) if err != nil { w.logger.Error(err) - if !errors.Is(err, ErrShuttingDown) { + if !errors.Is(err, ErrShuttingDown) && + !errors.Is(err, errDownloadCancelled) && + !errors.Is(err, io.ErrClosedPipe) { w.registerAlert(newDownloadFailedAlert(bucket, path, prefix, marker, offset, length, int64(len(contracts)), err)) } } @@ -1130,15 +1133,7 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } - // make sure only one of the following is set - var disablePreshardingEncryption bool - if jc.DecodeForm("disablepreshardingencryption", &disablePreshardingEncryption) != nil { - return - } - if !disablePreshardingEncryption && jc.Request.FormValue("offset") == "" { - jc.Error(errors.New("if presharding encryption isn't disabled, the offset needs to be set"), http.StatusBadRequest) - return - } + // get the offset var offset int if jc.DecodeForm("offset", &offset) != nil { return @@ -1147,23 +1142,30 @@ func (w *worker) multipartUploadHandlerPUT(jc jape.Context) { return } + // fetch upload from bus + upload, err := w.bus.MultipartUpload(ctx, uploadID) + if isError(err, api.ErrMultipartUploadNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("failed to fetch multipart upload", err) != nil { + return + } + // built options opts := []UploadOption{ WithBlockHeight(up.CurrentHeight), WithContractSet(up.ContractSet), WithPacking(up.UploadPacking), WithRedundancySettings(up.RedundancySettings), + WithCustomKey(upload.Key), } - if disablePreshardingEncryption { - opts = append(opts, WithCustomKey(object.NoOpKey)) - } else { - upload, err := w.bus.MultipartUpload(ctx, uploadID) - if err != nil { - jc.Error(err, http.StatusBadRequest) - return - } + + // make sure only one of the following is set + if encryptionEnabled := !upload.Key.IsNoopKey(); encryptionEnabled && jc.Request.FormValue("offset") == "" { + jc.Error(errors.New("if object encryption (pre-erasure coding) wasn't disabled by creating the multipart upload with the no-op key, the offset needs to be set"), http.StatusBadRequest) + return + } else if encryptionEnabled { opts = append(opts, WithCustomEncryptionOffset(uint64(offset))) - opts = append(opts, WithCustomKey(upload.Key)) } // attach gouging checker to the context @@ -1296,6 +1298,7 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush return nil, errors.New("uploadMaxMemory cannot be 0") } + ctx, cancel := context.WithCancelCause(context.Background()) w := &worker{ alerts: alerts.WithOrigin(b, fmt.Sprintf("worker.%s", id)), allowPrivateIPs: allowPrivateIPs, @@ -1306,13 +1309,10 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush logger: l.Sugar().Named("worker").Named(id), startTime: time.Now(), uploadingPackedSlabs: make(map[string]bool), + shutdownCtx: ctx, + shutdownCtxCancel: cancel, } - ctx, cancel := context.WithCancelCause(context.Background()) - ctx = context.WithValue(ctx, keyInteractionRecorder, w) - w.shutdownCtx = ctx - w.shutdownCtxCancel = cancel - w.initAccounts(b) w.initPriceTables() w.initTransportPool() @@ -1321,7 +1321,6 @@ func New(masterKey [32]byte, id string, b Bus, contractLockingDuration, busFlush w.initUploadManager(uploadMaxMemory, uploadMaxOverdrive, uploadOverdriveTimeout, l.Sugar().Named("uploadmanager")) w.initContractSpendingRecorder(busFlushInterval) - w.initHostInteractionRecorder(busFlushInterval) return w, nil } @@ -1368,7 +1367,6 @@ func (w *worker) Shutdown(ctx context.Context) error { w.uploadManager.Stop(ctx) // stop recorders - w.hostInteractionRecorder.Stop(ctx) w.contractSpendingRecorder.Stop(ctx) // return error on timeout @@ -1452,14 +1450,23 @@ func (w *worker) scanHost(ctx context.Context, hostKey types.PublicKey, hostIP s default: } - // record host scan - w.hostInteractionRecorder.RecordHostScan(hostdb.HostScan{ - HostKey: hostKey, - Success: isSuccessfulInteraction(err), - Timestamp: time.Now(), - Settings: settings, - PriceTable: pt, + // record host scan - make sure this isn't interrupted by the same context + // used to time out the scan itself because otherwise we won't be able to + // record scans that timed out. + recordCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + scanErr := w.bus.RecordHostScans(recordCtx, []hostdb.HostScan{ + { + HostKey: hostKey, + Success: isSuccessfulInteraction(err), + Timestamp: time.Now(), + Settings: settings, + PriceTable: pt, + }, }) + if scanErr != nil { + w.logger.Errorf("failed to record host scan: %v", scanErr) + } return settings, pt, duration, err }