diff --git a/api/wallet.go b/api/wallet.go index 576a2da99..f7cb24268 100644 --- a/api/wallet.go +++ b/api/wallet.go @@ -10,6 +10,26 @@ import ( "go.sia.tech/core/types" ) +type ( + // A SiacoinElement is a SiacoinOutput along with its ID. + SiacoinElement struct { + types.SiacoinOutput + ID types.Hash256 `json:"id"` + MaturityHeight uint64 `json:"maturityHeight"` + } + + // A Transaction is an on-chain transaction relevant to a particular wallet, + // paired with useful metadata. + Transaction struct { + Raw types.Transaction `json:"raw,omitempty"` + Index types.ChainIndex `json:"index"` + ID types.TransactionID `json:"id"` + Inflow types.Currency `json:"inflow"` + Outflow types.Currency `json:"outflow"` + Timestamp time.Time `json:"timestamp"` + } +) + type ( // WalletFundRequest is the request type for the /wallet/fund endpoint. WalletFundRequest struct { @@ -73,6 +93,7 @@ type ( Spendable types.Currency `json:"spendable"` Confirmed types.Currency `json:"confirmed"` Unconfirmed types.Currency `json:"unconfirmed"` + Immature types.Currency `json:"immature"` } // WalletSignRequest is the request type for the /wallet/sign endpoint. diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 7367003e0..e9b9e8a56 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -19,7 +19,6 @@ import ( "go.sia.tech/renterd/build" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" - "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker" "go.uber.org/zap" @@ -83,7 +82,7 @@ type Bus interface { // wallet Wallet(ctx context.Context) (api.WalletResponse, error) WalletDiscard(ctx context.Context, txn types.Transaction) error - WalletOutputs(ctx context.Context) (resp []wallet.SiacoinElement, err error) + WalletOutputs(ctx context.Context) (resp []api.SiacoinElement, err error) WalletPending(ctx context.Context) (resp []types.Transaction, err error) WalletRedistribute(ctx context.Context, outputs int, amount types.Currency) (ids []types.TransactionID, err error) } @@ -240,12 +239,18 @@ func (ap *Autopilot) Run() error { ap.workers.withWorker(func(w Worker) { defer ap.logger.Info("autopilot iteration ended") + // log worker id chosen for this maintenance iteration. + workerID, err := w.ID(ap.shutdownCtx) + if err != nil { + ap.logger.Warn("failed to reach worker, err: %v", err) + } else { + ap.logger.Infof("using worker %s for iteration", workerID) + } + // initiate a host scan - no need to be synced or configured for scanning ap.s.tryUpdateTimeout() ap.s.tryPerformHostScan(ap.shutdownCtx, w, forceScan) - - // reset forceScan - forceScan = false + forceScan = false // reset forceScan // block until consensus is synced if synced, blocked, interrupted := ap.blockUntilSynced(ap.ticker.C); !synced { @@ -271,14 +276,6 @@ func (ap *Autopilot) Run() error { return } - // Log worker id chosen for this maintenance iteration. - workerID, err := w.ID(ap.shutdownCtx) - if err != nil { - ap.logger.Errorf("aborting maintenance, failed to fetch worker id, err: %v", err) - return - } - ap.logger.Infof("using worker %s for iteration", workerID) - // update the loop state // // NOTE: it is important this is the first action we perform in this diff --git a/autopilot/contractor.go b/autopilot/contractor.go index 4e5e8c842..6a92ba48d 100644 --- a/autopilot/contractor.go +++ b/autopilot/contractor.go @@ -14,9 +14,9 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" - "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/worker" "go.uber.org/zap" ) @@ -1425,7 +1425,7 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf "renterFunds", renterFunds, "expectedNewStorage", expectedNewStorage, ) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { + if isErr(err, wallet.ErrNotEnoughFunds) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err @@ -1508,7 +1508,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI return api.ContractMetadata{}, true, err } c.logger.Errorw("refresh failed", zap.Error(err), "hk", hk, "fcid", fcid) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { + if isErr(err, wallet.ErrNotEnoughFunds) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err @@ -1572,7 +1572,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos if err != nil { // TODO: keep track of consecutive failures and break at some point c.logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) - if strings.Contains(err.Error(), wallet.ErrInsufficientBalance.Error()) { + if isErr(err, wallet.ErrNotEnoughFunds) { return api.ContractMetadata{}, false, err } return api.ContractMetadata{}, true, err diff --git a/autopilot/migrator.go b/autopilot/migrator.go index 4a4e31de6..e6e79e0ef 100644 --- a/autopilot/migrator.go +++ b/autopilot/migrator.go @@ -144,7 +144,7 @@ func (m *migrator) performMigrations(p *workerPool) { // fetch worker id once id, err := w.ID(ctx) if err != nil { - m.logger.Errorf("failed to fetch worker id: %v", err) + m.logger.Errorf("failed to reach worker, err: %v", err) return } diff --git a/autopilot/scanner.go b/autopilot/scanner.go index e512d1f87..4c15f0bbe 100644 --- a/autopilot/scanner.go +++ b/autopilot/scanner.go @@ -193,13 +193,13 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo s.logger.Infof("%s started", scanType) s.wg.Add(1) + s.ap.wg.Add(1) go func(st string) { defer s.wg.Done() + defer s.ap.wg.Done() - var interrupted bool for resp := range s.launchScanWorkers(ctx, w, s.launchHostScans()) { if s.isInterrupted() || s.ap.isStopped() { - interrupted = true break } if resp.err != nil && !strings.Contains(resp.err.Error(), "connection refused") { @@ -212,8 +212,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo hostCfg := s.ap.State().cfg.Hosts maxDowntime := time.Duration(hostCfg.MaxDowntimeHours) * time.Hour minRecentScanFailures := hostCfg.MinRecentScanFailures - - if !interrupted && maxDowntime > 0 { + if !s.ap.isStopped() && maxDowntime > 0 { s.logger.Debugf("removing hosts that have been offline for more than %v and have failed at least %d scans", maxDowntime, minRecentScanFailures) removed, err := s.bus.RemoveOfflineHosts(ctx, minRecentScanFailures, maxDowntime) if err != nil { @@ -253,10 +252,7 @@ func (s *scanner) tryUpdateTimeout() { func (s *scanner) launchHostScans() chan scanReq { reqChan := make(chan scanReq, s.scanBatchSize) - - s.ap.wg.Add(1) go func() { - defer s.ap.wg.Done() defer close(reqChan) var offset int diff --git a/autopilot/scanner_test.go b/autopilot/scanner_test.go index d5833d1fb..6214ec4a1 100644 --- a/autopilot/scanner_test.go +++ b/autopilot/scanner_test.go @@ -87,7 +87,7 @@ func TestScanner(t *testing.T) { // init new scanner b := &mockBus{hosts: hosts} w := &mockWorker{blockChan: make(chan struct{})} - s := newTestScanner(b, w) + s := newTestScanner(b) // assert it started a host scan s.tryPerformHostScan(context.Background(), w, false) @@ -139,7 +139,7 @@ func (s *scanner) isScanning() bool { return s.scanning } -func newTestScanner(b *mockBus, w *mockWorker) *scanner { +func newTestScanner(b *mockBus) *scanner { ap := &Autopilot{} ap.shutdownCtx, ap.shutdownCtxCancel = context.WithCancel(context.Background()) return &scanner{ diff --git a/bus/bus.go b/bus/bus.go index 76dab7cc5..b64c598f9 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -14,9 +14,12 @@ import ( "time" "go.sia.tech/core/consensus" + "go.sia.tech/core/gateway" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/coreutils/syncer" + "go.sia.tech/coreutils/wallet" "go.sia.tech/gofakes3" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" @@ -25,7 +28,6 @@ import ( "go.sia.tech/renterd/bus/client" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/object" - "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" "go.sia.tech/siad/modules" "go.uber.org/zap" @@ -47,23 +49,17 @@ func NewClient(addr, password string) *Client { } type ( - // A ChainManager manages blockchain state. + // ChainManager tracks multiple blockchains and identifies the best valid + // chain. ChainManager interface { - AcceptBlock(types.Block) error - BlockAtHeight(height uint64) (types.Block, bool) - IndexAtHeight(height uint64) (types.ChainIndex, error) - LastBlockTime() time.Time - Subscribe(s modules.ConsensusSetSubscriber, ccID modules.ConsensusChangeID, cancel <-chan struct{}) error - Synced() bool + AddBlocks(blocks []types.Block) error + AddPoolTransactions(txns []types.Transaction) (bool, error) + Block(id types.BlockID) (types.Block, bool) + PoolTransaction(txid types.TransactionID) (types.Transaction, bool) + PoolTransactions() []types.Transaction + RecommendedFee() types.Currency TipState() consensus.State - } - - // A Syncer can connect to other peers and synchronize the blockchain. - Syncer interface { - BroadcastTransaction(txn types.Transaction, dependsOn []types.Transaction) - Connect(addr string) error - Peers() []string - SyncerAddress(ctx context.Context) (string, error) + UnconfirmedParents(txn types.Transaction) []types.Transaction } // A TransactionPool can validate and relay unconfirmed transactions. @@ -76,19 +72,6 @@ type ( UnconfirmedParents(txn types.Transaction) ([]types.Transaction, error) } - // A Wallet can spend and receive siacoins. - Wallet interface { - Address() types.Address - Balance() (spendable, confirmed, unconfirmed types.Currency, _ error) - FundTransaction(cs consensus.State, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, error) - Redistribute(cs consensus.State, outputs int, amount, feePerByte types.Currency, pool []types.Transaction) ([]types.Transaction, []types.Hash256, error) - ReleaseInputs(txn ...types.Transaction) - SignTransaction(cs consensus.State, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error - Tip() (types.ChainIndex, error) - Transactions(offset, limit int) ([]wallet.Transaction, error) - UnspentOutputs() ([]wallet.SiacoinElement, error) - } - // A HostDB stores information about hosts. HostDB interface { Host(ctx context.Context, hostKey types.PublicKey) (hostdb.HostInfo, error) @@ -207,14 +190,48 @@ type ( WalletMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.WalletMetricsQueryOpts) ([]api.WalletMetric, error) } + + Syncer interface { + Addr() string + BroadcastHeader(h gateway.BlockHeader) + BroadcastTransactionSet([]types.Transaction) + Connect(ctx context.Context, addr string) (*syncer.Peer, error) + Peers() []*syncer.Peer + } + + Wallet interface { + Address() types.Address + Balance() (wallet.Balance, error) + Close() error + FundTransaction(txn *types.Transaction, amount types.Currency, useUnconfirmed bool) ([]types.Hash256, error) + Redistribute(outputs int, amount, feePerByte types.Currency) (txns []types.Transaction, toSign []types.Hash256, err error) + ReleaseInputs(txns ...types.Transaction) + SignTransaction(txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) + SpendableOutputs() ([]wallet.SiacoinElement, error) + Tip() (types.ChainIndex, error) + UnconfirmedTransactions() ([]wallet.Event, error) + Events(offset, limit int) ([]wallet.Event, error) + } + + WebhookManager interface { + webhooks.Broadcaster + Close() error + Delete(webhooks.Webhook) error + Info() ([]webhooks.Webhook, []webhooks.WebhookQueueInfo) + Register(webhooks.Webhook) error + } ) type bus struct { startTime time.Time + alerts alerts.Alerter + alertMgr *alerts.Manager + webhooks WebhookManager + cm ChainManager s Syncer - tp TransactionPool + w Wallet as AutopilotStore eas EphemeralAccountStore @@ -222,16 +239,12 @@ type bus struct { ms MetadataStore ss SettingStore mtrcs MetricsStore - w Wallet accounts *accounts contractLocks *contractLocks uploadingSectors *uploadingSectorsCache - alerts alerts.Alerter - alertMgr *alerts.Manager - hooks *webhooks.Manager - logger *zap.SugaredLogger + logger *zap.SugaredLogger } // Handler returns an HTTP handler that serves the bus API. @@ -376,7 +389,7 @@ func (b *bus) Handler() http.Handler { // Shutdown shuts down the bus. func (b *bus) Shutdown(ctx context.Context) error { - b.hooks.Close() + b.webhooks.Close() accounts := b.accounts.ToPersist() err := b.eas.SaveAccounts(ctx, accounts) if err != nil { @@ -401,27 +414,38 @@ func (b *bus) consensusAcceptBlock(jc jape.Context) { if jc.Decode(&block) != nil { return } - if jc.Check("failed to accept block", b.cm.AcceptBlock(block)) != nil { + + if jc.Check("failed to accept block", b.cm.AddBlocks([]types.Block{block})) != nil { return } + + if block.V2 == nil { + b.s.BroadcastHeader(gateway.BlockHeader{ + ParentID: block.ParentID, + Nonce: block.Nonce, + Timestamp: block.Timestamp, + MerkleRoot: block.MerkleRoot(), + }) + } } func (b *bus) syncerAddrHandler(jc jape.Context) { - addr, err := b.s.SyncerAddress(jc.Request.Context()) - if jc.Check("failed to fetch syncer's address", err) != nil { - return - } - jc.Encode(addr) + jc.Encode(b.s.Addr()) } func (b *bus) syncerPeersHandler(jc jape.Context) { - jc.Encode(b.s.Peers()) + var peers []string + for _, p := range b.s.Peers() { + peers = append(peers, p.String()) + } + jc.Encode(peers) } func (b *bus) syncerConnectHandler(jc jape.Context) { var addr string if jc.Decode(&addr) == nil { - jc.Check("couldn't connect to peer", b.s.Connect(addr)) + _, err := b.s.Connect(jc.Request.Context(), addr) + jc.Check("couldn't connect to peer", err) } } @@ -436,19 +460,25 @@ func (b *bus) consensusNetworkHandler(jc jape.Context) { } func (b *bus) txpoolFeeHandler(jc jape.Context) { - fee := b.tp.RecommendedFee() - jc.Encode(fee) + jc.Encode(b.cm.RecommendedFee()) } func (b *bus) txpoolTransactionsHandler(jc jape.Context) { - jc.Encode(b.tp.Transactions()) + jc.Encode(b.cm.PoolTransactions()) } func (b *bus) txpoolBroadcastHandler(jc jape.Context) { var txnSet []types.Transaction - if jc.Decode(&txnSet) == nil { - jc.Check("couldn't broadcast transaction set", b.tp.AcceptTransactionSet(txnSet)) + if jc.Decode(&txnSet) != nil { + return + } + + _, err := b.cm.AddPoolTransactions(txnSet) + if jc.Check("couldn't broadcast transaction set", err) != nil { + return } + + b.s.BroadcastTransactionSet(txnSet) } func (b *bus) bucketsHandlerGET(jc jape.Context) { @@ -515,7 +545,7 @@ func (b *bus) bucketHandlerGET(jc jape.Context) { func (b *bus) walletHandler(jc jape.Context) { address := b.w.Address() - spendable, confirmed, unconfirmed, err := b.w.Balance() + balance, err := b.w.Balance() if jc.Check("couldn't fetch wallet balance", err) != nil { return } @@ -528,9 +558,10 @@ func (b *bus) walletHandler(jc jape.Context) { jc.Encode(api.WalletResponse{ ScanHeight: tip.Height, Address: address, - Confirmed: confirmed, - Spendable: spendable, - Unconfirmed: unconfirmed, + Confirmed: balance.Confirmed, + Spendable: balance.Spendable, + Unconfirmed: balance.Unconfirmed, + Immature: balance.Immature, }) } @@ -549,40 +580,68 @@ func (b *bus) walletTransactionsHandler(jc jape.Context) { return } + // convertToTransactions converts wallet events to API transactions. + convertToTransactions := func(events []wallet.Event) []api.Transaction { + transactions := make([]api.Transaction, len(events)) + for i, e := range events { + transactions[i] = api.Transaction{ + Raw: e.Transaction, + Index: e.Index, + ID: types.TransactionID(e.ID), + Inflow: e.Inflow, + Outflow: e.Outflow, + Timestamp: e.Timestamp, + } + } + return transactions + } + if before.IsZero() && since.IsZero() { - txns, err := b.w.Transactions(offset, limit) + events, err := b.w.Events(offset, limit) if jc.Check("couldn't load transactions", err) == nil { - jc.Encode(txns) + jc.Encode(convertToTransactions(events)) } return } // TODO: remove this when 'before' and 'since' are deprecated, until then we // fetch all transactions and paginate manually if either is specified - txns, err := b.w.Transactions(0, -1) + events, err := b.w.Events(0, -1) if jc.Check("couldn't load transactions", err) != nil { return } - filtered := txns[:0] - for _, txn := range txns { + filtered := events[:0] + for _, txn := range events { if (before.IsZero() || txn.Timestamp.Before(before)) && (since.IsZero() || txn.Timestamp.After(since)) { filtered = append(filtered, txn) } } - txns = filtered + events = filtered if limit == 0 || limit == -1 { - jc.Encode(txns[offset:]) + jc.Encode(convertToTransactions(events[offset:])) } else { - jc.Encode(txns[offset : offset+limit]) + jc.Encode(convertToTransactions(events[offset : offset+limit])) } return } func (b *bus) walletOutputsHandler(jc jape.Context) { - utxos, err := b.w.UnspentOutputs() + utxos, err := b.w.SpendableOutputs() if jc.Check("couldn't load outputs", err) == nil { - jc.Encode(utxos) + // convert to siacoin elements + elements := make([]api.SiacoinElement, len(utxos)) + for i, sce := range utxos { + elements[i] = api.SiacoinElement{ + ID: sce.StateElement.ID, + SiacoinOutput: types.SiacoinOutput{ + Value: sce.SiacoinOutput.Value, + Address: sce.SiacoinOutput.Address, + }, + MaturityHeight: sce.MaturityHeight, + } + } + jc.Encode(elements) } } @@ -592,24 +651,22 @@ func (b *bus) walletFundHandler(jc jape.Context) { return } txn := wfr.Transaction + if len(txn.MinerFees) == 0 { // if no fees are specified, we add some - fee := b.tp.RecommendedFee().Mul64(b.cm.TipState().TransactionWeight(txn)) + fee := b.cm.RecommendedFee().Mul64(b.cm.TipState().TransactionWeight(txn)) txn.MinerFees = []types.Currency{fee} } - toSign, err := b.w.FundTransaction(b.cm.TipState(), &txn, wfr.Amount.Add(txn.MinerFees[0]), wfr.UseUnconfirmedTxns) + + toSign, err := b.w.FundTransaction(&txn, wfr.Amount.Add(txn.MinerFees[0]), wfr.UseUnconfirmedTxns) if jc.Check("couldn't fund transaction", err) != nil { return } - parents, err := b.tp.UnconfirmedParents(txn) - if jc.Check("couldn't load transaction dependencies", err) != nil { - b.w.ReleaseInputs(txn) - return - } + jc.Encode(api.WalletFundResponse{ Transaction: txn, ToSign: toSign, - DependsOn: parents, + DependsOn: b.cm.UnconfirmedParents(txn), }) } @@ -618,10 +675,8 @@ func (b *bus) walletSignHandler(jc jape.Context) { if jc.Decode(&wsr) != nil { return } - err := b.w.SignTransaction(b.cm.TipState(), &wsr.Transaction, wsr.ToSign, wsr.CoveredFields) - if jc.Check("couldn't sign transaction", err) == nil { - jc.Encode(wsr.Transaction) - } + b.w.SignTransaction(&wsr.Transaction, wsr.ToSign, wsr.CoveredFields) + jc.Encode(wsr.Transaction) } func (b *bus) walletRedistributeHandler(jc jape.Context) { @@ -634,8 +689,7 @@ func (b *bus) walletRedistributeHandler(jc jape.Context) { return } - cs := b.cm.TipState() - txns, toSign, err := b.w.Redistribute(cs, wfr.Outputs, wfr.Amount, b.tp.RecommendedFee(), b.tp.Transactions()) + txns, toSign, err := b.w.Redistribute(wfr.Outputs, wfr.Amount, b.cm.RecommendedFee()) if jc.Check("couldn't redistribute money in the wallet into the desired outputs", err) != nil { return } @@ -647,15 +701,12 @@ func (b *bus) walletRedistributeHandler(jc jape.Context) { } for i := 0; i < len(txns); i++ { - err = b.w.SignTransaction(cs, &txns[i], toSign, types.CoveredFields{WholeTransaction: true}) - if jc.Check("couldn't sign the transaction", err) != nil { - b.w.ReleaseInputs(txns...) - return - } + b.w.SignTransaction(&txns[i], toSign, types.CoveredFields{WholeTransaction: true}) ids = append(ids, txns[i].ID()) } - if jc.Check("couldn't broadcast the transaction", b.tp.AcceptTransactionSet(txns)) != nil { + _, err = b.cm.AddPoolTransactions(txns) + if jc.Check("couldn't broadcast the transaction", err) != nil { b.w.ReleaseInputs(txns...) return } @@ -690,23 +741,15 @@ func (b *bus) walletPrepareFormHandler(jc jape.Context) { txn := types.Transaction{ FileContracts: []types.FileContract{fc}, } - txn.MinerFees = []types.Currency{b.tp.RecommendedFee().Mul64(cs.TransactionWeight(txn))} - toSign, err := b.w.FundTransaction(cs, &txn, cost.Add(txn.MinerFees[0]), true) + txn.MinerFees = []types.Currency{b.cm.RecommendedFee().Mul64(cs.TransactionWeight(txn))} + toSign, err := b.w.FundTransaction(&txn, cost.Add(txn.MinerFees[0]), true) if jc.Check("couldn't fund transaction", err) != nil { return } - cf := wallet.ExplicitCoveredFields(txn) - err = b.w.SignTransaction(cs, &txn, toSign, cf) - if jc.Check("couldn't sign transaction", err) != nil { - b.w.ReleaseInputs(txn) - return - } - parents, err := b.tp.UnconfirmedParents(txn) - if jc.Check("couldn't load transaction dependencies", err) != nil { - b.w.ReleaseInputs(txn) - return - } - jc.Encode(append(parents, txn)) + + b.w.SignTransaction(&txn, toSign, wallet.ExplicitCoveredFields(txn)) + + jc.Encode(append(b.cm.UnconfirmedParents(txn), txn)) } func (b *bus) walletPrepareRenewHandler(jc jape.Context) { @@ -747,20 +790,14 @@ func (b *bus) walletPrepareRenewHandler(jc jape.Context) { // Fund the txn. We are not signing it yet since it's not complete. The host // still needs to complete it and the revision + contract are signed with // the renter key by the worker. - toSign, err := b.w.FundTransaction(cs, &txn, cost, true) + toSign, err := b.w.FundTransaction(&txn, cost, true) if jc.Check("couldn't fund transaction", err) != nil { return } - // Add any required parents. - parents, err := b.tp.UnconfirmedParents(txn) - if jc.Check("couldn't load transaction dependencies", err) != nil { - b.w.ReleaseInputs(txn) - return - } jc.Encode(api.WalletPrepareRenewResponse{ ToSign: toSign, - TransactionSet: append(parents, txn), + TransactionSet: append(b.cm.UnconfirmedParents(txn), txn), }) } @@ -780,7 +817,7 @@ func (b *bus) walletPendingHandler(jc jape.Context) { return false } - txns := b.tp.Transactions() + txns := b.cm.PoolTransactions() relevant := txns[:0] for _, txn := range txns { if isRelevant(txn) { @@ -1727,10 +1764,17 @@ func (b *bus) paramsHandlerUploadGET(jc jape.Context) { } func (b *bus) consensusState() api.ConsensusState { + cs := b.cm.TipState() + + var synced bool + if block, ok := b.cm.Block(cs.Index.ID); ok && time.Since(block.Timestamp) < 2*cs.BlockInterval() { + synced = true + } + return api.ConsensusState{ - BlockHeight: b.cm.TipState().Index.Height, - LastBlockTime: api.TimeRFC3339(b.cm.LastBlockTime()), - Synced: b.cm.Synced(), + BlockHeight: cs.Index.Height, + LastBlockTime: api.TimeRFC3339(cs.PrevTimestamps[0]), + Synced: synced, } } @@ -1763,7 +1807,7 @@ func (b *bus) gougingParams(ctx context.Context) (api.GougingParams, error) { ConsensusState: cs, GougingSettings: gs, RedundancySettings: rs, - TransactionFee: b.tp.RecommendedFee(), + TransactionFee: b.cm.RecommendedFee(), }, nil } @@ -2049,7 +2093,7 @@ func (b *bus) webhookActionHandlerPost(jc jape.Context) { if jc.Check("failed to decode action", jc.Decode(&action)) != nil { return } - b.hooks.BroadcastAction(jc.Request.Context(), action) + b.webhooks.BroadcastAction(jc.Request.Context(), action) } func (b *bus) webhookHandlerDelete(jc jape.Context) { @@ -2057,7 +2101,7 @@ func (b *bus) webhookHandlerDelete(jc jape.Context) { if jc.Decode(&wh) != nil { return } - err := b.hooks.Delete(wh) + err := b.webhooks.Delete(wh) if errors.Is(err, webhooks.ErrWebhookNotFound) { jc.Error(fmt.Errorf("webhook for URL %v and event %v.%v not found", wh.URL, wh.Module, wh.Event), http.StatusNotFound) return @@ -2067,7 +2111,7 @@ func (b *bus) webhookHandlerDelete(jc jape.Context) { } func (b *bus) webhookHandlerGet(jc jape.Context) { - webhooks, queueInfos := b.hooks.Info() + webhooks, queueInfos := b.webhooks.Info() jc.Encode(api.WebHookResponse{ Queues: queueInfos, Webhooks: webhooks, @@ -2079,7 +2123,7 @@ func (b *bus) webhookHandlerPost(jc jape.Context) { if jc.Decode(&req) != nil { return } - err := b.hooks.Register(webhooks.Webhook{ + err := b.webhooks.Register(webhooks.Webhook{ Event: req.Event, Module: req.Module, URL: req.URL, @@ -2345,14 +2389,13 @@ func (b *bus) multipartHandlerListPartsPOST(jc jape.Context) { } // New returns a new Bus. -func New(s Syncer, am *alerts.Manager, hm *webhooks.Manager, cm ChainManager, tp TransactionPool, w Wallet, hdb HostDB, as AutopilotStore, ms MetadataStore, ss SettingStore, eas EphemeralAccountStore, mtrcs MetricsStore, l *zap.Logger) (*bus, error) { +func New(am *alerts.Manager, hm WebhookManager, cm ChainManager, s Syncer, w Wallet, hdb HostDB, as AutopilotStore, ms MetadataStore, ss SettingStore, eas EphemeralAccountStore, mtrcs MetricsStore, l *zap.Logger) (*bus, error) { b := &bus{ alerts: alerts.WithOrigin(am, "bus"), alertMgr: am, - hooks: hm, - s: s, + webhooks: hm, cm: cm, - tp: tp, + s: s, w: w, hdb: hdb, as: as, @@ -2448,5 +2491,6 @@ func New(s Syncer, am *alerts.Manager, hm *webhooks.Manager, cm ChainManager, tp if err := eas.SetUncleanShutdown(); err != nil { return nil, fmt.Errorf("failed to mark account shutdown as unclean: %w", err) } + return b, nil } diff --git a/bus/client/client_test.go b/bus/client/client_test.go index 9cd1e80e7..ce84c8986 100644 --- a/bus/client/client_test.go +++ b/bus/client/client_test.go @@ -68,9 +68,9 @@ func newTestClient(dir string) (*client.Client, func() error, func(context.Conte return nil, nil, nil, err } - // create client - client := client.New("http://"+l.Addr().String(), "test") - b, cleanup, err := node.NewBus(node.BusConfig{ + // create bus + network, genesis := build.Network() + b, shutdown, _, err := node.NewBus(node.BusConfig{ Bus: config.Bus{ AnnouncementMaxAgeHours: 24 * 7 * 52, // 1 year Bootstrap: false, @@ -78,7 +78,8 @@ func newTestClient(dir string) (*client.Client, func() error, func(context.Conte UsedUTXOExpiry: time.Minute, SlabBufferCompletionThreshold: 0, }, - Miner: node.NewMiner(client), + Network: network, + Genesis: genesis, SlabPruningInterval: time.Minute, SlabPruningCooldown: time.Minute, }, filepath.Join(dir, "bus"), types.GeneratePrivateKey(), zap.New(zapcore.NewNopCore())) @@ -86,6 +87,9 @@ func newTestClient(dir string) (*client.Client, func() error, func(context.Conte return nil, nil, nil, err } + // create client + client := client.New("http://"+l.Addr().String(), "test") + // create server server := http.Server{Handler: jape.BasicAuth("test")(b)} @@ -99,7 +103,7 @@ func newTestClient(dir string) (*client.Client, func() error, func(context.Conte shutdownFn := func(ctx context.Context) error { server.Shutdown(ctx) - return cleanup(ctx) + return shutdown(ctx) } return client, serveFn, shutdownFn, nil } diff --git a/bus/client/wallet.go b/bus/client/wallet.go index 0d4761e51..55105b728 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -10,7 +10,6 @@ import ( rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - "go.sia.tech/renterd/wallet" ) // SendSiacoins is a helper method that sends siacoins to the given outputs. @@ -67,7 +66,7 @@ func (c *Client) WalletFund(ctx context.Context, txn *types.Transaction, amount } // WalletOutputs returns the set of unspent outputs controlled by the wallet. -func (c *Client) WalletOutputs(ctx context.Context) (resp []wallet.SiacoinElement, err error) { +func (c *Client) WalletOutputs(ctx context.Context) (resp []api.SiacoinElement, err error) { err = c.c.WithContext(ctx).GET("/wallet/outputs", &resp) return } @@ -137,7 +136,7 @@ func (c *Client) WalletSign(ctx context.Context, txn *types.Transaction, toSign } // WalletTransactions returns all transactions relevant to the wallet. -func (c *Client) WalletTransactions(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []wallet.Transaction, err error) { +func (c *Client) WalletTransactions(ctx context.Context, opts ...api.WalletTransactionsOption) (resp []api.Transaction, err error) { c.c.Custom("GET", "/wallet/transactions", nil, &resp) values := url.Values{} diff --git a/cmd/renterd/main.go b/cmd/renterd/main.go index 98e075d92..41c910784 100644 --- a/cmd/renterd/main.go +++ b/cmd/renterd/main.go @@ -386,10 +386,11 @@ func main() { mustParseWorkers(depWorkerRemoteAddrsStr, depWorkerRemotePassStr) } - network, _ := build.Network() + network, genesis := build.Network() busCfg := node.BusConfig{ Bus: cfg.Bus, Network: network, + Genesis: genesis, SlabPruningInterval: time.Hour, SlabPruningCooldown: 30 * time.Second, } @@ -483,13 +484,13 @@ func main() { busAddr, busPassword := cfg.Bus.RemoteAddr, cfg.Bus.RemotePassword if cfg.Bus.RemoteAddr == "" { - b, fn, err := node.NewBus(busCfg, cfg.Directory, getSeed(), logger) + b, shutdown, _, err := node.NewBus(busCfg, cfg.Directory, getSeed(), logger) if err != nil { logger.Fatal("failed to create bus, err: " + err.Error()) } shutdownFns = append(shutdownFns, shutdownFn{ name: "Bus", - fn: fn, + fn: shutdown, }) mux.sub["/api/bus"] = treeMux{h: auth(b)} diff --git a/go.mod b/go.mod index b366e69e1..f7ea56fae 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/montanaflynn/stats v0.7.1 gitlab.com/NebulousLabs/encoding v0.0.0-20200604091946-456c3dc907fe go.sia.tech/core v0.2.1 - go.sia.tech/coreutils v0.0.3 + go.sia.tech/coreutils v0.0.4-0.20240313143809-01b5d444a630 go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 go.sia.tech/hostd v1.0.2 go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 @@ -74,6 +74,7 @@ require ( gitlab.com/NebulousLabs/ratelimit v0.0.0-20200811080431-99b8f0768b2e // indirect gitlab.com/NebulousLabs/siamux v0.0.2-0.20220630142132-142a1443a259 // indirect gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213 // indirect + go.etcd.io/bbolt v1.3.9 // indirect go.sia.tech/web v0.0.0-20231213145933-3f175a86abff // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.21.0 // indirect diff --git a/go.sum b/go.sum index 2a3b756ab..ee1e0ba9d 100644 --- a/go.sum +++ b/go.sum @@ -13,6 +13,9 @@ github.com/aws/aws-sdk-go v1.50.1 h1:AwnLUM7TcH9vMZqA4TcDKmGfLmDW5VXwT5tPH6kXylo github.com/aws/aws-sdk-go v1.50.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/br0xen/boltbrowser v0.0.0-20230531143731-fcc13603daaf h1:NyqdH+vWNYPwQIK9jNv7sdIVbRGclwIdFhQk3+qlNEs= +github.com/br0xen/boltbrowser v0.0.0-20230531143731-fcc13603daaf/go.mod h1:uhjRwoqgy4g6fCwo7OJHjCxDOmx/YSCz2rnAYb63ZhY= +github.com/br0xen/termbox-util v0.0.0-20170904143325-de1d4c83380e/go.mod h1:x9wJlgOj74OFTOBwXOuO8pBguW37EgYNx51Dbjkfzo4= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.86.0 h1:jEKN5VHNYNYtfDL2lUFLTRo+nOVNPFxpXTstVx0rqHI= @@ -149,6 +152,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nsf/termbox-go v1.1.1/go.mod h1:T0cTdVuOwf7pHQNtfhnEbzHbcNyCEcVU4YPpouCbVxo= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -189,8 +193,14 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -236,12 +246,26 @@ gitlab.com/NebulousLabs/threadgroup v0.0.0-20200608151952-38921fbef213/go.mod h1 gitlab.com/NebulousLabs/writeaheadlog v0.0.0-20200618142844-c59a90f49130/go.mod h1:SxigdS5Q1ui+OMgGAXt1E/Fg3RB6PvKXMov2O3gvIzs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.sia.tech/core v0.2.1 h1:CqmMd+T5rAhC+Py3NxfvGtvsj/GgwIqQHHVrdts/LqY= go.sia.tech/core v0.2.1/go.mod h1:3EoY+rR78w1/uGoXXVqcYdwSjSJKuEMI5bL7WROA27Q= go.sia.tech/coreutils v0.0.3 h1:ZxuzovRpQMvfy/pCOV4om1cPF6sE15GyJyK36kIrF1Y= go.sia.tech/coreutils v0.0.3/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= +go.sia.tech/coreutils v0.0.4-0.20240306153355-9185ee5bb346 h1:HeZRhx0JEWLYZ9TZMAjcWzC/3P+GYeNuB5bkRE0NAkQ= +go.sia.tech/coreutils v0.0.4-0.20240306153355-9185ee5bb346/go.mod h1:UBFc77wXiE//eyilO5HLOncIEj7F69j0Nv2OkFujtP0= +go.sia.tech/coreutils v0.0.4-0.20240307153935-66de052e7ef7 h1:XXIMhtB9mcR1PlwdkPT78gWaCMSTJ/xDwrOm+qJJBY4= +go.sia.tech/coreutils v0.0.4-0.20240307153935-66de052e7ef7/go.mod h1:OTMMLucKVcpMDCIwGQlvbi4QNgc3O2Y291xMheYrpOQ= +go.sia.tech/coreutils v0.0.4-0.20240313143809-01b5d444a630 h1:KpVSI9ijpyyjwXvxV0tSWK9ukFyTupibg9OrlvjiKDk= +go.sia.tech/coreutils v0.0.4-0.20240313143809-01b5d444a630/go.mod h1:QvsXghS4wqhJosQq3AkMjA2mJ6pbDB7PgG+w5b09/z0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2 h1:ulzfJNjxN5DjXHClkW2pTiDk+eJ+0NQhX87lFDZ03t0= go.sia.tech/gofakes3 v0.0.0-20231109151325-e0d47c10dce2/go.mod h1:PlsiVCn6+wssrR7bsOIlZm0DahsVrDydrlbjY4F14sg= +go.sia.tech/hostd v1.0.2-beta.2.0.20240131203318-9d84aad6ef13 h1:JcyVUtJfzeMh+zJAW20BMVhBYekg+h0T8dMeF7GzAFs= +go.sia.tech/hostd v1.0.2-beta.2.0.20240131203318-9d84aad6ef13/go.mod h1:axfDFNGPnVrGMf2nrX6sDNYJrft87kTD3XpzOyT+Wi8= go.sia.tech/hostd v1.0.2 h1:GjzNIAlwg3/dViF6258Xn5DI3+otQLRqmkoPDugP+9Y= go.sia.tech/hostd v1.0.2/go.mod h1:zGw+AGVmazAp4ydvo7bZLNKTy1J51RI6Mp/oxRtYT6c= go.sia.tech/jape v0.11.2-0.20240124024603-93559895d640 h1:mSaJ622P7T/M97dAK8iPV+IRIC9M5vV28NHeceoWO3M= @@ -274,6 +298,8 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -326,6 +352,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= diff --git a/internal/node/miner.go b/internal/node/miner.go deleted file mode 100644 index 9043196b4..000000000 --- a/internal/node/miner.go +++ /dev/null @@ -1,150 +0,0 @@ -// TODO: remove this file when we can import it from hostd -package node - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "sync" - - "go.sia.tech/core/types" - "go.sia.tech/siad/crypto" - "go.sia.tech/siad/modules" - stypes "go.sia.tech/siad/types" - "lukechampine.com/frand" -) - -const solveAttempts = 1e4 - -type ( - // Consensus defines a minimal interface needed by the miner to interact - // with the consensus set - Consensus interface { - AcceptBlock(context.Context, types.Block) error - } - - // A Miner is a CPU miner that can mine blocks, sending the reward to a - // specified address. - Miner struct { - consensus Consensus - - mu sync.Mutex - height stypes.BlockHeight - target stypes.Target - currentBlockID stypes.BlockID - txnsets map[modules.TransactionSetID][]stypes.TransactionID - transactions []stypes.Transaction - } -) - -var errFailedToSolve = errors.New("failed to solve block") - -// ProcessConsensusChange implements modules.ConsensusSetSubscriber. -func (m *Miner) ProcessConsensusChange(cc modules.ConsensusChange) { - m.mu.Lock() - defer m.mu.Unlock() - m.target = cc.ChildTarget - m.currentBlockID = cc.AppliedBlocks[len(cc.AppliedBlocks)-1].ID() - m.height = cc.BlockHeight -} - -// ReceiveUpdatedUnconfirmedTransactions implements modules.TransactionPoolSubscriber -func (m *Miner) ReceiveUpdatedUnconfirmedTransactions(diff *modules.TransactionPoolDiff) { - m.mu.Lock() - defer m.mu.Unlock() - - reverted := make(map[stypes.TransactionID]bool) - for _, setID := range diff.RevertedTransactions { - for _, txnID := range m.txnsets[setID] { - reverted[txnID] = true - } - } - - filtered := m.transactions[:0] - for _, txn := range m.transactions { - if reverted[txn.ID()] { - continue - } - filtered = append(filtered, txn) - } - - for _, txnset := range diff.AppliedTransactions { - m.txnsets[txnset.ID] = txnset.IDs - filtered = append(filtered, txnset.Transactions...) - } - m.transactions = filtered -} - -// mineBlock attempts to mine a block and add it to the consensus set. -func (m *Miner) mineBlock(addr stypes.UnlockHash) error { - m.mu.Lock() - block := stypes.Block{ - ParentID: m.currentBlockID, - Timestamp: stypes.CurrentTimestamp(), - } - - randBytes := frand.Bytes(stypes.SpecifierLen) - randTxn := stypes.Transaction{ - ArbitraryData: [][]byte{append(modules.PrefixNonSia[:], randBytes...)}, - } - block.Transactions = append([]stypes.Transaction{randTxn}, m.transactions...) - block.MinerPayouts = append(block.MinerPayouts, stypes.SiacoinOutput{ - Value: block.CalculateSubsidy(m.height + 1), - UnlockHash: addr, - }) - target := m.target - m.mu.Unlock() - - merkleRoot := block.MerkleRoot() - header := make([]byte, 80) - copy(header, block.ParentID[:]) - binary.LittleEndian.PutUint64(header[40:48], uint64(block.Timestamp)) - copy(header[48:], merkleRoot[:]) - - var nonce uint64 - var solved bool - for i := 0; i < solveAttempts; i++ { - id := crypto.HashBytes(header) - if bytes.Compare(target[:], id[:]) >= 0 { - block.Nonce = *(*stypes.BlockNonce)(header[32:40]) - solved = true - break - } - binary.LittleEndian.PutUint64(header[32:], nonce) - nonce += stypes.ASICHardforkFactor - } - if !solved { - return errFailedToSolve - } - - var b types.Block - convertToCore(&block, (*types.V1Block)(&b)) - if err := m.consensus.AcceptBlock(context.Background(), types.Block(b)); err != nil { - return fmt.Errorf("failed to get block accepted: %w", err) - } - return nil -} - -// Mine mines n blocks, sending the reward to addr -func (m *Miner) Mine(addr types.Address, n int) error { - var err error - for mined := 1; mined <= n; { - // return the error only if the miner failed to solve the block, - // ignore any consensus related errors - if err = m.mineBlock(stypes.UnlockHash(addr)); errors.Is(err, errFailedToSolve) { - return fmt.Errorf("failed to mine block %v: %w", mined, errFailedToSolve) - } - mined++ - } - return nil -} - -// NewMiner initializes a new CPU miner -func NewMiner(consensus Consensus) *Miner { - return &Miner{ - consensus: consensus, - txnsets: make(map[modules.TransactionSetID][]stypes.TransactionID), - } -} diff --git a/internal/node/node.go b/internal/node/node.go index e94cfbb4d..2896dbd8a 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -4,37 +4,40 @@ import ( "context" "errors" "fmt" - "log" + "net" "net/http" "os" "path/filepath" "time" "go.sia.tech/core/consensus" + "go.sia.tech/core/gateway" "go.sia.tech/core/types" + "go.sia.tech/coreutils" + "go.sia.tech/coreutils/chain" + "go.sia.tech/coreutils/syncer" + "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/autopilot" "go.sia.tech/renterd/bus" "go.sia.tech/renterd/config" "go.sia.tech/renterd/stores" - "go.sia.tech/renterd/wallet" "go.sia.tech/renterd/webhooks" "go.sia.tech/renterd/worker" - "go.sia.tech/siad/modules" - mconsensus "go.sia.tech/siad/modules/consensus" - "go.sia.tech/siad/modules/gateway" - "go.sia.tech/siad/modules/transactionpool" - "go.sia.tech/siad/sync" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/crypto/blake2b" "gorm.io/gorm" ) +// TODOs: +// - add wallet metrics +// - add UPNP support + type BusConfig struct { config.Bus Network *consensus.Network - Miner *Miner + Genesis types.Block DBLoggerConfig stores.LoggerConfig DBDialector gorm.Dialector DBMetricsDialector gorm.Dialector @@ -52,47 +55,13 @@ type ( ShutdownFn = func(context.Context) error ) -func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (http.Handler, ShutdownFn, error) { - gatewayDir := filepath.Join(dir, "gateway") - if err := os.MkdirAll(gatewayDir, 0700); err != nil { - return nil, nil, err - } - g, err := gateway.New(cfg.GatewayAddr, cfg.Bootstrap, gatewayDir) - if err != nil { - return nil, nil, err - } - consensusDir := filepath.Join(dir, "consensus") - if err := os.MkdirAll(consensusDir, 0700); err != nil { - return nil, nil, err - } - cs, errCh := mconsensus.New(g, cfg.Bootstrap, consensusDir) - select { - case err := <-errCh: - if err != nil { - return nil, nil, err - } - default: - go func() { - if err := <-errCh; err != nil { - log.Println("WARNING: consensus initialization returned an error:", err) - } - }() - } - tpoolDir := filepath.Join(dir, "transactionpool") - if err := os.MkdirAll(tpoolDir, 0700); err != nil { - return nil, nil, err - } - tp, err := transactionpool.New(cs, g, tpoolDir) - if err != nil { - return nil, nil, err - } - +func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, logger *zap.Logger) (http.Handler, ShutdownFn, *chain.Manager, error) { // If no DB dialector was provided, use SQLite. dbConn := cfg.DBDialector if dbConn == nil { dbDir := filepath.Join(dir, "db") if err := os.MkdirAll(dbDir, 0700); err != nil { - return nil, nil, err + return nil, nil, nil, err } dbConn = stores.NewSQLiteConnection(filepath.Join(dbDir, "db.sqlite")) } @@ -100,17 +69,26 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht if dbMetricsConn == nil { dbDir := filepath.Join(dir, "db") if err := os.MkdirAll(dbDir, 0700); err != nil { - return nil, nil, err + return nil, nil, nil, err } dbMetricsConn = stores.NewSQLiteConnection(filepath.Join(dbDir, "metrics.sqlite")) } + consensusDir := filepath.Join(dir, "consensus") + if err := os.MkdirAll(consensusDir, 0700); err != nil { + return nil, nil, nil, err + } + bdb, err := coreutils.OpenBoltChainDB(filepath.Join(dir, "chain.db")) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to open chain database: %w", err) + } + alertsMgr := alerts.NewManager() - sqlLogger := stores.NewSQLLogger(l.Named("db"), cfg.DBLoggerConfig) - walletAddr := wallet.StandardAddress(seed.PublicKey()) + sqlLogger := stores.NewSQLLogger(logger.Named("db"), cfg.DBLoggerConfig) + walletAddr := types.StandardUnlockHash(seed.PublicKey()) sqlStoreDir := filepath.Join(dir, "partial_slabs") announcementMaxAge := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour - sqlStore, ccid, err := stores.NewSQLStore(stores.Config{ + sqlStore, err := stores.NewSQLStore(stores.Config{ Conn: dbConn, ConnMetrics: dbMetricsConn, Alerts: alerts.WithOrigin(alertsMgr, "bus"), @@ -120,76 +98,110 @@ func NewBus(cfg BusConfig, dir string, seed types.PrivateKey, l *zap.Logger) (ht PersistInterval: cfg.PersistInterval, WalletAddress: walletAddr, SlabBufferCompletionThreshold: cfg.SlabBufferCompletionThreshold, - Logger: l.Sugar(), + Logger: logger.Sugar(), GormLogger: sqlLogger, RetryTransactionIntervals: []time.Duration{200 * time.Millisecond, 500 * time.Millisecond, time.Second, 3 * time.Second, 10 * time.Second, 10 * time.Second}, }) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - hooksMgr, err := webhooks.NewManager(l.Named("webhooks").Sugar(), sqlStore) + wh, err := webhooks.NewManager(logger.Named("webhooks").Sugar(), sqlStore) if err != nil { - return nil, nil, err + return nil, nil, nil, err } // Hook up webhooks to alerts. - alertsMgr.RegisterWebhookBroadcaster(hooksMgr) - - cancelSubscribe := make(chan struct{}) - go func() { - subscribeErr := cs.ConsensusSetSubscribe(sqlStore, ccid, cancelSubscribe) - if errors.Is(subscribeErr, modules.ErrInvalidConsensusChangeID) { - l.Warn("Invalid consensus change ID detected - resyncing consensus") - // Reset the consensus state within the database and rescan. - if err := sqlStore.ResetConsensusSubscription(); err != nil { - l.Fatal(fmt.Sprintf("Failed to reset consensus subscription of SQLStore: %v", err)) - return - } - // Subscribe from the beginning. - subscribeErr = cs.ConsensusSetSubscribe(sqlStore, modules.ConsensusChangeBeginning, cancelSubscribe) - } - if subscribeErr != nil && !errors.Is(subscribeErr, sync.ErrStopped) { - l.Fatal(fmt.Sprintf("ConsensusSetSubscribe returned an error: %v", err)) - } - }() + alertsMgr.RegisterWebhookBroadcaster(wh) - w := wallet.NewSingleAddressWallet(seed, sqlStore, cfg.UsedUTXOExpiry, zap.NewNop().Sugar()) - tp.TransactionPoolSubscribe(w) - if err := cs.ConsensusSetSubscribe(w, modules.ConsensusChangeRecent, nil); err != nil { - return nil, nil, err + // create chain manager + store, state, err := chain.NewDBStore(bdb, cfg.Network, cfg.Genesis) + if err != nil { + return nil, nil, nil, err + } + cm := chain.NewManager(store, state) + + // create wallet + w, err := wallet.NewSingleAddressWallet(seed, cm, sqlStore, wallet.WithReservationDuration(cfg.UsedUTXOExpiry)) + if err != nil { + return nil, nil, nil, err + } + + // create syncer + l, err := net.Listen("tcp", cfg.GatewayAddr) + if err != nil { + return nil, nil, nil, err + } + syncerAddr := l.Addr().String() + + // peers will reject us if our hostname is empty or unspecified, so use loopback + host, port, _ := net.SplitHostPort(syncerAddr) + if ip := net.ParseIP(host); ip == nil || ip.IsUnspecified() { + syncerAddr = net.JoinHostPort("127.0.0.1", port) } - if m := cfg.Miner; m != nil { - if err := cs.ConsensusSetSubscribe(m, ccid, nil); err != nil { - return nil, nil, err + header := gateway.Header{ + GenesisID: cfg.Genesis.ID(), + UniqueID: gateway.GenerateUniqueID(), + NetAddress: syncerAddr, + } + s := syncer.New(l, cm, sqlStore, header, syncer.WithSyncInterval(100*time.Millisecond), syncer.WithLogger(logger.Named("syncer"))) + + b, err := bus.New(alertsMgr, wh, cm, s, w, sqlStore, sqlStore, sqlStore, sqlStore, sqlStore, sqlStore, logger) + if err != nil { + return nil, nil, nil, err + } + + // bootstrap the syncer + if cfg.Bootstrap { + if cfg.Network == nil { + return nil, nil, nil, errors.New("cannot bootstrap without a network") + } + + var bootstrapPeers []string + switch cfg.Network.Name { + case "mainnet": + bootstrapPeers = syncer.MainnetBootstrapPeers + case "zen": + bootstrapPeers = syncer.ZenBootstrapPeers + case "anagami": + bootstrapPeers = syncer.AnagamiBootstrapPeers + default: + return nil, nil, nil, fmt.Errorf("no available bootstrap peers for unknown network '%s'", cfg.Network.Name) + } + + for _, addr := range bootstrapPeers { + if err := sqlStore.AddPeer(addr); err != nil { + return nil, nil, nil, fmt.Errorf("%w: failed to add bootstrap peer '%s'", err, addr) + } } - tp.TransactionPoolSubscribe(m) } - cm, err := NewChainManager(cs, cfg.Network) + // start the syncer + go s.Run() + + // fetch chain index + ci, err := sqlStore.ChainIndex() if err != nil { - return nil, nil, err + return nil, nil, nil, fmt.Errorf("%w: failed to fetch chain index", err) } - b, err := bus.New(syncer{g, tp}, alertsMgr, hooksMgr, cm, NewTransactionPool(tp), w, sqlStore, sqlStore, sqlStore, sqlStore, sqlStore, sqlStore, l) + // subscribe the store to the chain manager + err = cm.AddSubscriber(sqlStore, ci) if err != nil { - return nil, nil, err + return nil, nil, nil, err } shutdownFn := func(ctx context.Context) error { return errors.Join( - func() error { - close(cancelSubscribe) - return nil - }(), - g.Close(), - cs.Close(), - tp.Close(), + l.Close(), + w.Close(), b.Shutdown(ctx), sqlStore.Close(), + store.Close(), + bdb.Close(), ) } - return b.Handler(), shutdownFn, nil + return b.Handler(), shutdownFn, cm, nil } func NewWorker(cfg config.Worker, b worker.Bus, seed types.PrivateKey, l *zap.Logger) (http.Handler, ShutdownFn, error) { diff --git a/internal/node/syncer.go b/internal/node/syncer.go deleted file mode 100644 index 6a4e80c98..000000000 --- a/internal/node/syncer.go +++ /dev/null @@ -1,43 +0,0 @@ -package node - -import ( - "context" - - "go.sia.tech/core/types" - "go.sia.tech/siad/modules" - stypes "go.sia.tech/siad/types" -) - -type syncer struct { - g modules.Gateway - tp modules.TransactionPool -} - -func (s syncer) Addr() string { - return string(s.g.Address()) -} - -func (s syncer) Peers() []string { - var peers []string - for _, p := range s.g.Peers() { - peers = append(peers, string(p.NetAddress)) - } - return peers -} - -func (s syncer) Connect(addr string) error { - return s.g.Connect(modules.NetAddress(addr)) -} - -func (s syncer) BroadcastTransaction(txn types.Transaction, dependsOn []types.Transaction) { - txnSet := make([]stypes.Transaction, len(dependsOn)+1) - for i, txn := range dependsOn { - convertToSiad(txn, &txnSet[i]) - } - convertToSiad(txn, &txnSet[len(txnSet)-1]) - s.tp.Broadcast(txnSet) -} - -func (s syncer) SyncerAddress(ctx context.Context) (string, error) { - return string(s.g.Address()), nil -} diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 16b3acbfd..b2032d828 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -1,6 +1,7 @@ package e2e import ( + "bytes" "context" "encoding/hex" "errors" @@ -14,8 +15,11 @@ import ( "time" "github.com/minio/minio-go/v7" + "gitlab.com/NebulousLabs/encoding" "go.sia.tech/core/consensus" "go.sia.tech/core/types" + "go.sia.tech/coreutils" + "go.sia.tech/coreutils/chain" "go.sia.tech/jape" "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot" @@ -31,12 +35,12 @@ import ( "lukechampine.com/frand" "go.sia.tech/renterd/worker" + stypes "go.sia.tech/siad/types" ) const ( testBusFlushInterval = 100 * time.Millisecond testBusPersistInterval = 2 * time.Second - latestHardforkHeight = 50 // foundation hardfork height in testing ) var ( @@ -61,7 +65,7 @@ type TestCluster struct { s3ShutdownFns []func(context.Context) error network *consensus.Network - miner *node.Miner + cm *chain.Manager apID string dbName string dir string @@ -186,8 +190,6 @@ func newTestLoggerCustom(level zapcore.Level) *zap.Logger { // newTestCluster creates a new cluster without hosts with a funded bus. func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { - t.Helper() - // Skip any test that requires a cluster when running short tests. if testing.Short() { t.SkipNow() @@ -298,11 +300,8 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { }) tt.OK(err) - // Create miner. - busCfg.Miner = node.NewMiner(busClient) - // Create bus. - b, bStopFn, err := node.NewBus(busCfg, busDir, wk, logger) + b, bShutdownFn, cm, err := node.NewBus(busCfg, busDir, wk, logger) tt.OK(err) busAuth := jape.BasicAuth(busPassword) @@ -312,7 +311,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { var busShutdownFns []func(context.Context) error busShutdownFns = append(busShutdownFns, busServer.Shutdown) - busShutdownFns = append(busShutdownFns, bStopFn) + busShutdownFns = append(busShutdownFns, bShutdownFn) // Create worker. w, wShutdownFn, err := node.NewWorker(workerCfg, busClient, wk, logger) @@ -357,7 +356,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { dbName: dbName, logger: logger, network: busCfg.Network, - miner: busCfg.Miner, + cm: cm, tt: tt, wk: wk, @@ -425,25 +424,21 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { // Fund the bus. if funding { - cluster.MineBlocks(latestHardforkHeight) - tt.Retry(1000, 100*time.Millisecond, func() error { - resp, err := busClient.ConsensusState(ctx) - if err != nil { + cluster.MineBlocks(busCfg.Network.HardforkFoundation.Height + blocksPerDay) // mine until the first block reward matures + tt.Retry(100, 100*time.Millisecond, func() error { + if cs, err := busClient.ConsensusState(ctx); err != nil { return err + } else if !cs.Synced { + return fmt.Errorf("chain not synced: %v", cs.Synced) } - if !resp.Synced || resp.BlockHeight < latestHardforkHeight { - return fmt.Errorf("chain not synced: %v %v", resp.Synced, resp.BlockHeight < latestHardforkHeight) - } - res, err := cluster.Bus.Wallet(ctx) - if err != nil { + if res, err := cluster.Bus.Wallet(ctx); err != nil { return err + } else if res.Confirmed.IsZero() { + return fmt.Errorf("wallet not funded: %+v", res) + } else { + return nil } - - if res.Confirmed.IsZero() { - tt.Fatal("wallet not funded") - } - return nil }) } @@ -451,7 +446,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { cluster.AddHostsBlocking(nHosts) cluster.WaitForContracts() cluster.WaitForContractSet(test.ContractSet, nHosts) - _ = cluster.WaitForAccounts() + cluster.WaitForAccounts() } return cluster @@ -502,7 +497,7 @@ func (c *TestCluster) MineToRenewWindow() { if cs.BlockHeight >= renewWindowStart { c.tt.Fatalf("already in renew window: bh: %v, currentPeriod: %v, periodLength: %v, renewWindow: %v", cs.BlockHeight, ap.CurrentPeriod, ap.Config.Contracts.Period, renewWindowStart) } - c.MineBlocks(int(renewWindowStart - cs.BlockHeight)) + c.MineBlocks(renewWindowStart - cs.BlockHeight) c.Sync() } @@ -541,24 +536,26 @@ func (c *TestCluster) synced(hosts []*Host) (bool, error) { } // MineBlocks uses the bus' miner to mine n blocks. -func (c *TestCluster) MineBlocks(n int) { +func (c *TestCluster) MineBlocks(n uint64) { c.tt.Helper() wallet, err := c.Bus.Wallet(context.Background()) c.tt.OK(err) // If we don't have any hosts in the cluster mine all blocks right away. if len(c.hosts) == 0 { - c.tt.OK(c.miner.Mine(wallet.Address, n)) + c.tt.OK(c.mineBlocks(wallet.Address, n)) c.Sync() + return } + // Otherwise mine blocks in batches of 3 to avoid going out of sync with // hosts by too many blocks. - for mined := 0; mined < n; { + for mined := uint64(0); mined < n; { toMine := n - mined if toMine > 10 { toMine = 10 } - c.tt.OK(c.miner.Mine(wallet.Address, toMine)) + c.tt.OK(c.mineBlocks(wallet.Address, toMine)) c.Sync() mined += toMine } @@ -584,6 +581,7 @@ func (c *TestCluster) WaitForAccounts() []api.Account { func (c *TestCluster) WaitForContracts() []api.Contract { c.tt.Helper() + // build hosts map hostsMap := make(map[types.PublicKey]struct{}) for _, host := range c.hosts { @@ -677,7 +675,11 @@ func (c *TestCluster) AddHost(h *Host) { res, err := c.Bus.Wallet(context.Background()) c.tt.OK(err) - fundAmt := res.Confirmed.Div64(2).Div64(uint64(len(c.hosts))) // 50% of bus balance + // Fund host with one blockreward + fundAmt := c.cm.TipState().BlockReward() + for fundAmt.Cmp(res.Confirmed) > 0 { + c.tt.Fatal("not enough funds to fund host") + } var scos []types.SiacoinOutput for i := 0; i < 10; i++ { scos = append(scos, types.SiacoinOutput{ @@ -833,43 +835,56 @@ func (c *TestCluster) waitForHostContracts(hosts map[types.PublicKey]struct{}) { }) } -// testNetwork returns a custom network for testing which matches the -// configuration of siad consensus in testing. -func testNetwork() *consensus.Network { - n := &consensus.Network{ - InitialCoinbase: types.Siacoins(300000), - MinimumCoinbase: types.Siacoins(299990), - InitialTarget: types.BlockID{4: 32}, +func (c *TestCluster) mineBlocks(addr types.Address, n uint64) error { + for i := uint64(0); i < n; i++ { + if block, found := coreutils.MineBlock(c.cm, addr, time.Second); !found { + return errors.New("failed to find block") + } else if err := c.Bus.AcceptBlock(context.Background(), block); err != nil { + return err + } } + return nil +} - n.HardforkDevAddr.Height = 3 - n.HardforkDevAddr.OldAddress = types.Address{} - n.HardforkDevAddr.NewAddress = types.Address{} - - n.HardforkTax.Height = 10 - - n.HardforkStorageProof.Height = 10 - - n.HardforkOak.Height = 20 - n.HardforkOak.FixHeight = 23 - n.HardforkOak.GenesisTimestamp = time.Now().Add(-1e6 * time.Second) +func convertToCore(siad encoding.SiaMarshaler, core types.DecoderFrom) { + var buf bytes.Buffer + siad.MarshalSia(&buf) + d := types.NewBufDecoder(buf.Bytes()) + core.DecodeFrom(d) + if d.Err() != nil { + panic(d.Err()) + } +} - n.HardforkASIC.Height = 5 - n.HardforkASIC.OakTime = 10000 * time.Second - n.HardforkASIC.OakTarget = types.BlockID{255, 255} +// testNetwork returns a modified version of Zen used for testing +func testNetwork() (*consensus.Network, types.Block) { + // use a modified version of Zen + n, genesis := chain.TestnetZen() - n.HardforkFoundation.Height = 50 - n.HardforkFoundation.PrimaryAddress = types.StandardUnlockHash(types.GeneratePrivateKey().PublicKey()) - n.HardforkFoundation.FailsafeAddress = types.StandardUnlockHash(types.GeneratePrivateKey().PublicKey()) + // we have to set the initial target to 128 to ensure blocks we mine match + // the PoW testnet in siad testnet consensu + n.InitialTarget = types.BlockID{0x80} - // make it difficult to reach v2 in most tests + // we have to make minimum coinbase get hit after 10 blocks to ensure we + // match the siad test network settings, otherwise the blocksubsidy is + // considered invalid after 10 blocks + n.MinimumCoinbase = types.Siacoins(299990) + n.HardforkDevAddr.Height = 1 + n.HardforkTax.Height = 1 + n.HardforkStorageProof.Height = 1 + n.HardforkOak.Height = 1 + n.HardforkASIC.Height = 1 + n.HardforkFoundation.Height = 1 n.HardforkV2.AllowHeight = 1000 n.HardforkV2.RequireHeight = 1020 - return n + // TODO: remove once we got rid of all siad dependencies + convertToCore(stypes.GenesisBlock, (*types.V1Block)(&genesis)) + return n, genesis } func testBusCfg() node.BusConfig { + network, genesis := testNetwork() return node.BusConfig{ Bus: config.Bus{ AnnouncementMaxAgeHours: 24 * 7 * 52, // 1 year @@ -879,7 +894,8 @@ func testBusCfg() node.BusConfig { UsedUTXOExpiry: time.Minute, SlabBufferCompletionThreshold: 0, }, - Network: testNetwork(), + Network: network, + Genesis: genesis, SlabPruningInterval: time.Second, SlabPruningCooldown: 10 * time.Millisecond, } diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 8d51a8e9c..644ac7ad7 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -21,12 +21,12 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" + "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/object" - "go.sia.tech/renterd/wallet" "go.uber.org/zap" "go.uber.org/zap/zapcore" "lukechampine.com/frand" @@ -102,7 +102,7 @@ func TestNewTestCluster(t *testing.T) { // revision first. cs, err := cluster.Bus.ConsensusState(context.Background()) tt.OK(err) - cluster.MineBlocks(int(contract.WindowStart - cs.BlockHeight - 4)) + cluster.MineBlocks(contract.WindowStart - cs.BlockHeight - 4) cluster.Sync() if cs.LastBlockTime.IsZero() { t.Fatal("last block time not set") @@ -590,38 +590,6 @@ func TestUploadDownloadBasic(t *testing.T) { t.Fatalf("mismatch for offset %v", offset) } } - - // fetch the contracts. - contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{}) - tt.OK(err) - - // broadcast the revision for each contract and assert the revision height - // is 0. - for _, c := range contracts { - if c.RevisionHeight != 0 { - t.Fatal("revision height should be 0") - } - tt.OK(w.RHPBroadcast(context.Background(), c.ID)) - } - - // mine a block to get the revisions mined. - cluster.MineBlocks(1) - - // check the revision height was updated. - tt.Retry(100, 100*time.Millisecond, func() error { - // fetch the contracts. - contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{}) - if err != nil { - return err - } - // assert the revision height was updated. - for _, c := range contracts { - if c.RevisionHeight == 0 { - return errors.New("revision height should be > 0") - } - } - return nil - }) } // TestUploadDownloadExtended is an integration test that verifies objects can @@ -943,17 +911,60 @@ func TestUploadDownloadSpending(t *testing.T) { tt.OK(err) } +func TestContractApplyChainUpdates(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + // create a test cluster without autopilot + cluster := newTestCluster(t, testClusterOptions{skipRunningAutopilot: true}) + defer cluster.Shutdown() + + // convenience variables + w := cluster.Worker + b := cluster.Bus + tt := cluster.tt + + // add a host + hosts := cluster.AddHosts(1) + h, err := b.Host(context.Background(), hosts[0].PublicKey()) + tt.OK(err) + + // manually form a contract with the host + cs, _ := b.ConsensusState(context.Background()) + wallet, _ := b.Wallet(context.Background()) + rev, _, _ := w.RHPForm(context.Background(), cs.BlockHeight+test.AutopilotConfig.Contracts.Period+test.AutopilotConfig.Contracts.RenewWindow, h.PublicKey, h.NetAddress, wallet.Address, types.Siacoins(1), types.Siacoins(1)) + contract, err := b.AddContract(context.Background(), rev, rev.Revision.MissedHostPayout().Sub(types.Siacoins(1)), types.Siacoins(1), cs.BlockHeight, api.ContractStatePending) + tt.OK(err) + + // assert revision height is 0 + if contract.RevisionHeight != 0 { + t.Fatalf("expected revision height to be 0, got %v", contract.RevisionHeight) + } + + // broadcast the revision for each contract + fcid := contract.ID + tt.OK(w.RHPBroadcast(context.Background(), fcid)) + cluster.MineBlocks(1) + + // check the revision height was updated. + tt.Retry(100, 100*time.Millisecond, func() error { + c, err := cluster.Bus.Contract(context.Background(), fcid) + tt.OK(err) + if c.RevisionHeight == 0 { + return fmt.Errorf("contract %v should have been revised", c.ID) + } + return nil + }) +} + // TestEphemeralAccounts tests the use of ephemeral accounts. func TestEphemeralAccounts(t *testing.T) { if testing.Short() { t.SkipNow() } - dir := t.TempDir() - cluster := newTestCluster(t, testClusterOptions{ - dir: dir, - logger: zap.NewNop(), - }) + cluster := newTestCluster(t, testClusterOptions{}) defer cluster.Shutdown() tt := cluster.tt @@ -1350,7 +1361,7 @@ func TestContractArchival(t *testing.T) { endHeight := contracts[0].WindowEnd cs, err := cluster.Bus.ConsensusState(context.Background()) tt.OK(err) - cluster.MineBlocks(int(endHeight - cs.BlockHeight + 1)) + cluster.MineBlocks(endHeight - cs.BlockHeight + 1) // check that we have 0 contracts tt.Retry(100, 100*time.Millisecond, func() error { @@ -1693,8 +1704,6 @@ func TestUploadPacking(t *testing.T) { } func TestWallet(t *testing.T) { - t.Skip("TODO: re-enable after our subscriber processes blocks properly") - if testing.Short() { t.SkipNow() } @@ -2197,7 +2206,7 @@ func TestWalletSendUnconfirmed(t *testing.T) { Value: toSend, }, }, false) - tt.AssertIs(err, wallet.ErrInsufficientBalance) + tt.AssertIs(err, wallet.ErrNotEnoughFunds) // try again - this time using unconfirmed transactions tt.OK(b.SendSiacoins(context.Background(), []types.SiacoinOutput{ @@ -2232,46 +2241,48 @@ func TestWalletSendUnconfirmed(t *testing.T) { } func TestWalletFormUnconfirmed(t *testing.T) { - // New cluster with autopilot disabled + // create cluster without autopilot cfg := clusterOptsDefault cfg.skipSettingAutopilot = true cluster := newTestCluster(t, cfg) defer cluster.Shutdown() + + // convenience variables b := cluster.Bus tt := cluster.tt - // Add a host. + // add a host (non-blocking) cluster.AddHosts(1) - // Send the full balance back to the wallet to make sure it's all - // unconfirmed. + // send all money to ourselves, making sure it's unconfirmed + feeReserve := types.Siacoins(1).Div64(100) wr, err := b.Wallet(context.Background()) tt.OK(err) tt.OK(b.SendSiacoins(context.Background(), []types.SiacoinOutput{ { Address: wr.Address, - Value: wr.Confirmed.Sub(types.Siacoins(1).Div64(100)), // leave some for the fee + Value: wr.Confirmed.Sub(feeReserve), // leave some for the fee }, }, false)) - // There should be hardly any money in the wallet. + // check wallet only has the reserve in the confirmed balance wr, err = b.Wallet(context.Background()) tt.OK(err) - if wr.Confirmed.Sub(wr.Unconfirmed).Cmp(types.Siacoins(1).Div64(100)) > 0 { + if wr.Confirmed.Sub(wr.Unconfirmed).Cmp(feeReserve) > 0 { t.Fatal("wallet should have hardly any confirmed balance") } - // There shouldn't be any contracts at this point. + // there shouldn't be any contracts yet contracts, err := b.Contracts(context.Background(), api.ContractsOpts{}) tt.OK(err) if len(contracts) != 0 { t.Fatal("expected 0 contracts", len(contracts)) } - // Enable autopilot by setting it. + // enable the autopilot by configuring it cluster.UpdateAutopilotConfig(context.Background(), test.AutopilotConfig) - // Wait for a contract to form. + // wait for a contract to form contractsFormed := cluster.WaitForContracts() if len(contractsFormed) != 1 { t.Fatal("expected 1 contract", len(contracts)) diff --git a/internal/test/e2e/host.go b/internal/test/e2e/host.go index 6100adad5..fcba48d61 100644 --- a/internal/test/e2e/host.go +++ b/internal/test/e2e/host.go @@ -32,7 +32,10 @@ import ( "go.uber.org/zap" ) -const blocksPerMonth = 144 * 30 +const ( + blocksPerDay = 144 + blocksPerMonth = blocksPerDay * 30 +) type stubMetricReporter struct{} diff --git a/internal/test/e2e/metrics_test.go b/internal/test/e2e/metrics_test.go index aaa139102..eb40c787b 100644 --- a/internal/test/e2e/metrics_test.go +++ b/internal/test/e2e/metrics_test.go @@ -80,12 +80,12 @@ func TestMetrics(t *testing.T) { } // check wallet metrics + t.Skip("TODO: check wallet metrics") wm, err := b.WalletMetrics(context.Background(), start, 10, time.Minute, api.WalletMetricsQueryOpts{}) tt.OK(err) if len(wm) == 0 { return errors.New("no wallet metrics") } - return nil }) } diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index de948c970..cc8231011 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -13,6 +13,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" "go.sia.tech/renterd/internal/test" + "go.uber.org/zap/zapcore" ) func TestHostPruning(t *testing.T) { @@ -21,8 +22,12 @@ func TestHostPruning(t *testing.T) { } // create a new test cluster - cluster := newTestCluster(t, clusterOptsDefault) + opts := clusterOptsDefault + opts.logger = newTestLoggerCustom(zapcore.DebugLevel) + cluster := newTestCluster(t, opts) defer cluster.Shutdown() + + // convenience variables b := cluster.Bus w := cluster.Worker a := cluster.Autopilot @@ -68,20 +73,8 @@ func TestHostPruning(t *testing.T) { // wait for the autopilot loop to finish at least once recordFailedInteractions(9, h1.PublicKey()) - // trigger the autopilot loop twice, failing to trigger it twice shouldn't - // fail the test, this avoids an NDF on windows - remaining := 2 - for i := 1; i < 100; i++ { - triggered, err := a.Trigger(false) - tt.OK(err) - if triggered { - remaining-- - if remaining == 0 { - break - } - } - time.Sleep(50 * time.Millisecond) - } + // trigger the autopilot + tt.OKAll(a.Trigger(true)) // assert the host was not pruned hostss, err := b.Hosts(context.Background(), api.GetHostsOptions{}) @@ -99,6 +92,7 @@ func TestHostPruning(t *testing.T) { hostss, err = b.Hosts(context.Background(), api.GetHostsOptions{}) tt.OK(err) if len(hostss) != 0 { + a.Trigger(true) // trigger autopilot return fmt.Errorf("host was not pruned, %+v", hostss[0].Interactions) } return nil diff --git a/stores/hostdb.go b/stores/hostdb.go index 65363c060..f13320db9 100644 --- a/stores/hostdb.go +++ b/stores/hostdb.go @@ -15,7 +15,6 @@ import ( "go.sia.tech/coreutils/chain" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" - "go.sia.tech/siad/modules" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -109,7 +108,6 @@ type ( dbConsensusInfo struct { Model - CCID []byte Height uint64 BlockID hash256 } @@ -906,39 +904,6 @@ func (ss *SQLStore) RecordPriceTables(ctx context.Context, priceTableUpdate []ho }) } -func (ss *SQLStore) processConsensusChangeHostDB(cc modules.ConsensusChange) { - height := uint64(cc.InitialHeight()) - for range cc.RevertedBlocks { - height-- - } - - var newAnnouncements []announcement - for _, sb := range cc.AppliedBlocks { - var b types.Block - convertToCore(sb, (*types.V1Block)(&b)) - - // Process announcements, but only if they are not too old. - if b.Timestamp.After(time.Now().Add(-ss.announcementMaxAge)) { - chain.ForEachHostAnnouncement(types.Block(b), func(hk types.PublicKey, ha chain.HostAnnouncement) { - if ha.NetAddress == "" { - return - } - newAnnouncements = append(newAnnouncements, announcement{ - HostAnnouncement: ha, - blockHeight: height, - blockID: b.ID(), - hk: hk, - timestamp: b.Timestamp, - }) - ss.unappliedHostKeys[hk] = struct{}{} - }) - } - height++ - } - - ss.unappliedAnnouncements = append(ss.unappliedAnnouncements, newAnnouncements...) -} - // excludeBlocked can be used as a scope for a db transaction to exclude blocked // hosts. func (ss *SQLStore) excludeBlocked(db *gorm.DB) *gorm.DB { @@ -987,18 +952,6 @@ func (ss *SQLStore) isBlocked(h dbHost) (blocked bool) { return } -func updateCCID(tx *gorm.DB, newCCID modules.ConsensusChangeID, newTip types.ChainIndex) error { - return tx.Model(&dbConsensusInfo{}).Where(&dbConsensusInfo{ - Model: Model{ - ID: consensusInfoID, - }, - }).Updates(map[string]interface{}{ - "CCID": newCCID[:], - "height": newTip.Height, - "block_id": hash256(newTip.ID), - }).Error -} - func updateChainIndex(tx *gorm.DB, newTip types.ChainIndex) error { return tx.Model(&dbConsensusInfo{}).Where(&dbConsensusInfo{ Model: Model{ diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index cbbc4428a..3f007088a 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -14,8 +14,6 @@ import ( "go.sia.tech/coreutils/chain" "go.sia.tech/renterd/api" "go.sia.tech/renterd/hostdb" - "go.sia.tech/siad/modules" - stypes "go.sia.tech/siad/types" "gorm.io/gorm" ) @@ -27,9 +25,6 @@ func (s *SQLStore) insertTestAnnouncement(a announcement) error { // SQLite DB. func TestSQLHostDB(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - if ss.ccid != modules.ConsensusChangeBeginning { - t.Fatal("wrong ccid", ss.ccid, modules.ConsensusChangeBeginning) - } // Try to fetch a random host. Should fail. ctx := context.Background() @@ -117,28 +112,6 @@ func TestSQLHostDB(t *testing.T) { if h3.KnownSince.IsZero() { t.Fatal("known since not set") } - - // Wait for the persist interval to pass to make sure an empty consensus - // change triggers a persist. - time.Sleep(testPersistInterval) - - // Apply a consensus change. - ccid2 := modules.ConsensusChangeID{1, 2, 3} - ss.ProcessConsensusChange(modules.ConsensusChange{ - ID: ccid2, - AppliedBlocks: []stypes.Block{{}}, - AppliedDiffs: []modules.ConsensusChangeDiffs{{}}, - }) - - // Connect to the same DB again. - hdb2 := ss.Reopen() - if hdb2.ccid != ccid2 { - t.Fatal("ccid wasn't updated", hdb2.ccid, ccid2) - } - _, err = hdb2.Host(ctx, hk) - if err != nil { - t.Fatal(err) - } } func (s *SQLStore) addTestScan(hk types.PublicKey, t time.Time, err error, settings rhpv2.HostSettings) error { @@ -1021,30 +994,32 @@ func TestAnnouncementMaxAge(t *testing.T) { db := newTestSQLStore(t, defaultTestSQLStoreConfig) defer db.Close() - if len(db.unappliedAnnouncements) != 0 { + // assert we don't have any announcements + if len(db.cs.announcements) != 0 { t.Fatal("expected 0 announcements") } - db.processConsensusChangeHostDB( - modules.ConsensusChange{ - ID: modules.ConsensusChangeID{1}, - BlockHeight: 1, - AppliedBlocks: []stypes.Block{ - { - Timestamp: stypes.Timestamp(time.Now().Add(-time.Hour).Add(-time.Minute).Unix()), - Transactions: []stypes.Transaction{newTestTransaction(newTestHostAnnouncement("foo.com:1000"))}, - }, - { - Timestamp: stypes.Timestamp(time.Now().Add(-time.Hour).Add(time.Minute).Unix()), - Transactions: []stypes.Transaction{newTestTransaction(newTestHostAnnouncement("foo.com:1001"))}, - }, - }, - }, - ) + // fabricate two blocks with announcements, one before the cutoff and one after + b1 := types.Block{ + Transactions: []types.Transaction{newTestTransaction(newTestHostAnnouncement("foo.com:1000"))}, + Timestamp: time.Now().Add(-db.cs.announcementMaxAge).Add(-time.Second), + } + b2 := types.Block{ + Transactions: []types.Transaction{newTestTransaction(newTestHostAnnouncement("foo.com:1001"))}, + Timestamp: time.Now().Add(-db.cs.announcementMaxAge).Add(time.Second), + } - if len(db.unappliedAnnouncements) != 1 { + // process b1, expect no announcements + db.cs.processChainApplyUpdateHostDB(&chain.ApplyUpdate{Block: b1}) + if len(db.cs.announcements) != 0 { + t.Fatal("expected 0 announcements") + } + + // process b2, expect 1 announcement + db.cs.processChainApplyUpdateHostDB(&chain.ApplyUpdate{Block: b2}) + if len(db.cs.announcements) != 1 { t.Fatal("expected 1 announcement") - } else if db.unappliedAnnouncements[0].NetAddress != "foo.com:1001" { + } else if db.cs.announcements[0].HostAnnouncement.NetAddress != "foo.com:1001" { t.Fatal("unexpected announcement") } } @@ -1072,10 +1047,15 @@ func (s *SQLStore) addTestHost(hk types.PublicKey) error { // addCustomTestHost ensures a host with given hostkey and net address exists. func (s *SQLStore) addCustomTestHost(hk types.PublicKey, na string) error { - s.unappliedHostKeys[hk] = struct{}{} - s.unappliedAnnouncements = append(s.unappliedAnnouncements, newTestAnnouncement(hk, na)) - s.lastSave = time.Now().Add(s.persistInterval * -2) - return s.applyUpdates(false) + // NOTE: insert through subscriber to ensure allowlist/blocklist get updated + s.cs.announcements = append(s.cs.announcements, announcement{ + blockHeight: s.cs.tip.Height, + blockID: s.cs.tip.ID, + hk: hk, + timestamp: time.Now().UTC().Round(time.Second), + HostAnnouncement: chain.HostAnnouncement{NetAddress: na}, + }) + return s.cs.commit() } // hosts returns all hosts in the db. Only used in testing since preloading all @@ -1132,6 +1112,6 @@ func newTestHostAnnouncement(na string) (chain.HostAnnouncement, types.PrivateKe return a, sk } -func newTestTransaction(ha chain.HostAnnouncement, sk types.PrivateKey) stypes.Transaction { - return stypes.Transaction{ArbitraryData: [][]byte{ha.ToArbitraryData(sk)}} +func newTestTransaction(ha chain.HostAnnouncement, sk types.PrivateKey) types.Transaction { + return types.Transaction{ArbitraryData: [][]byte{ha.ToArbitraryData(sk)}} } diff --git a/stores/metadata.go b/stores/metadata.go index 529d7ec89..8f44a782c 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -15,7 +15,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" - "go.sia.tech/siad/modules" "go.uber.org/zap" "gorm.io/gorm" "gorm.io/gorm/clause" @@ -714,7 +713,7 @@ func (s *SQLStore) AddContract(ctx context.Context, c rhpv2.ContractRevision, co return } - s.addKnownContract(types.FileContractID(added.FCID)) + s.cs.addKnownContract(types.FileContractID(added.FCID)) return added.convert(), nil } @@ -834,7 +833,7 @@ func (s *SQLStore) AddRenewedContract(ctx context.Context, c rhpv2.ContractRevis return err } - s.addKnownContract(c.ID()) + s.cs.addKnownContract(c.ID()) renewed = newContract return nil }); err != nil { @@ -914,10 +913,6 @@ func (s *SQLStore) Contract(ctx context.Context, id types.FileContractID) (api.C } func (s *SQLStore) ContractRoots(ctx context.Context, id types.FileContractID) (roots []types.Hash256, err error) { - if !s.isKnownContract(id) { - return nil, api.ErrContractNotFound - } - var dbRoots []hash256 if err = s.db. Raw(` @@ -993,10 +988,6 @@ SELECT c.fcid, MAX(c.size) as contract_size, COUNT(cs.db_sector_id) * ? as secto } func (s *SQLStore) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) { - if !s.isKnownContract(id) { - return api.ContractSize{}, api.ErrContractNotFound - } - var size struct { Size uint64 `json:"size"` Prunable uint64 `json:"prunable"` @@ -1428,19 +1419,6 @@ func (s *SQLStore) RecordContractSpending(ctx context.Context, records []api.Con return nil } -func (s *SQLStore) addKnownContract(fcid types.FileContractID) { - s.mu.Lock() - defer s.mu.Unlock() - s.knownContracts[fcid] = struct{}{} -} - -func (s *SQLStore) isKnownContract(fcid types.FileContractID) bool { - s.mu.Lock() - defer s.mu.Unlock() - _, found := s.knownContracts[fcid] - return found -} - func fetchUsedContracts(tx *gorm.DB, usedContracts map[types.PublicKey]map[types.FileContractID]struct{}) (map[types.FileContractID]dbContract, error) { fcids := make([]fileContractID, 0, len(usedContracts)) for _, hostFCIDs := range usedContracts { @@ -1870,7 +1848,7 @@ func (ss *SQLStore) UpdateSlab(ctx context.Context, s object.Slab, contractSet s Preload("Shards"). Take(&slab). Error; err == gorm.ErrRecordNotFound { - return fmt.Errorf("slab with key '%s' not found: %w", string(key), err) + return fmt.Errorf("slab with key '%s' not found: %w", s.Key.String(), err) } else if err != nil { return err } @@ -2905,95 +2883,6 @@ func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sort }, nil } -func (ss *SQLStore) processConsensusChangeContracts(cc modules.ConsensusChange) { - height := uint64(cc.InitialHeight()) - for _, sb := range cc.RevertedBlocks { - var b types.Block - convertToCore(sb, (*types.V1Block)(&b)) - - // revert contracts that got reorged to "pending". - for _, txn := range b.Transactions { - // handle contracts - for i := range txn.FileContracts { - fcid := txn.FileContractID(i) - if ss.isKnownContract(fcid) { - ss.unappliedContractState[fcid] = contractStatePending // revert from 'active' to 'pending' - ss.logger.Infow("contract state changed: active -> pending", - "fcid", fcid, - "reason", "contract reverted") - } - } - // handle contract revision - for _, rev := range txn.FileContractRevisions { - if ss.isKnownContract(rev.ParentID) { - if rev.RevisionNumber == math.MaxUint64 && rev.Filesize == 0 { - ss.unappliedContractState[rev.ParentID] = contractStateActive // revert from 'complete' to 'active' - ss.logger.Infow("contract state changed: complete -> active", - "fcid", rev.ParentID, - "reason", "final revision reverted") - } - } - } - // handle storage proof - for _, sp := range txn.StorageProofs { - if ss.isKnownContract(sp.ParentID) { - ss.unappliedContractState[sp.ParentID] = contractStateActive // revert from 'complete' to 'active' - ss.logger.Infow("contract state changed: complete -> active", - "fcid", sp.ParentID, - "reason", "storage proof reverted") - } - } - } - height-- - } - - for _, sb := range cc.AppliedBlocks { - var b types.Block - convertToCore(sb, (*types.V1Block)(&b)) - - // Update RevisionHeight and RevisionNumber for our contracts. - for _, txn := range b.Transactions { - // handle contracts - for i := range txn.FileContracts { - fcid := txn.FileContractID(i) - if ss.isKnownContract(fcid) { - ss.unappliedContractState[fcid] = contractStateActive // 'pending' -> 'active' - ss.logger.Infow("contract state changed: pending -> active", - "fcid", fcid, - "reason", "contract confirmed") - } - } - // handle contract revision - for _, rev := range txn.FileContractRevisions { - if ss.isKnownContract(rev.ParentID) { - ss.unappliedRevisions[types.FileContractID(rev.ParentID)] = revisionUpdate{ - height: height, - number: rev.RevisionNumber, - size: rev.Filesize, - } - if rev.RevisionNumber == math.MaxUint64 && rev.Filesize == 0 { - ss.unappliedContractState[rev.ParentID] = contractStateComplete // renewed: 'active' -> 'complete' - ss.logger.Infow("contract state changed: active -> complete", - "fcid", rev.ParentID, - "reason", "final revision confirmed") - } - } - } - // handle storage proof - for _, sp := range txn.StorageProofs { - if ss.isKnownContract(sp.ParentID) { - ss.unappliedProofs[sp.ParentID] = height - ss.unappliedContractState[sp.ParentID] = contractStateComplete // storage proof: 'active' -> 'complete' - ss.logger.Infow("contract state changed: active -> complete", - "fcid", sp.ParentID, - "reason", "storage proof confirmed") - } - } - } - height++ - } -} - func buildMarkerExpr(db *gorm.DB, bucket, prefix, marker, sortBy, sortDir string) (markerExpr clause.Expr, orderBy clause.OrderBy, err error) { // no marker if marker == "" { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 99256ac6d..f5f870e84 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2993,12 +2993,6 @@ func TestContractSizes(t *testing.T) { if n := prunableData(nil); n != 0 { t.Fatal("expected no prunable data", n) } - - // assert passing a non-existent fcid returns an error - _, err = ss.ContractSize(context.Background(), types.FileContractID{9}) - if err != api.ErrContractNotFound { - t.Fatal(err) - } } // dbObject retrieves a dbObject from the store. diff --git a/stores/migrations.go b/stores/migrations.go index 51ca729de..00950cb88 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -62,6 +62,12 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return performMigration(tx, dbIdentifier, "00006_peer_store", logger) }, }, + { + ID: "00006_coreutils_wallet", + Migrate: func(tx *gorm.DB) error { + return performMigration(tx, dbIdentifier, "00006_coreutils_wallet", logger) + }, + }, } // Create migrator. diff --git a/stores/migrations/mysql/main/migration_00007_coreutils_wallet.sql b/stores/migrations/mysql/main/migration_00007_coreutils_wallet.sql new file mode 100644 index 000000000..144e9f738 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00007_coreutils_wallet.sql @@ -0,0 +1,45 @@ +-- drop tables +DROP TABLE IF EXISTS `siacoin_elements`; +DROP TABLE IF EXISTS `transactions`; + +-- drop column +ALTER TABLE `consensus_infos` DROP COLUMN `cc_id`; + +-- dbWalletEvent +CREATE TABLE `wallet_events` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `event_id` varbinary(32) NOT NULL, + `inflow` longtext, + `outflow` longtext, + `transaction` longtext, + `maturity_height` bigint unsigned DEFAULT NULL, + `source` longtext, + `timestamp` bigint DEFAULT NULL, + `height` bigint unsigned DEFAULT NULL, + `block_id` varbinary(32) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `event_id` (`event_id`), + KEY `idx_wallet_events_maturity_height` (`maturity_height`), + KEY `idx_wallet_events_source` (`source`(191)), -- 191 is the max length for utf8mb4 + KEY `idx_wallet_events_timestamp` (`timestamp`), + KEY `idx_wallet_events_height` (`height`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +-- dbWalletOutput +CREATE TABLE `wallet_outputs` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `output_id` varbinary(32) NOT NULL, + `leaf_index` bigint, + `merkle_proof` blob NOT NULL, + `value` longtext, + `address` varbinary(32) DEFAULT NULL, + `maturity_height` bigint unsigned DEFAULT NULL, + `height` bigint unsigned DEFAULT NULL, + `block_id` varbinary(32) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `output_id` (`output_id`), + KEY `idx_wallet_outputs_maturity_height` (`maturity_height`), + KEY `idx_wallet_outputs_height` (`height`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 7a1f959a4..3a435b5bc 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -70,7 +70,6 @@ CREATE TABLE `buffered_slabs` ( CREATE TABLE `consensus_infos` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, - `cc_id` longblob, `height` bigint unsigned DEFAULT NULL, `block_id` longblob, PRIMARY KEY (`id`) @@ -345,20 +344,6 @@ CREATE TABLE `settings` ( KEY `idx_settings_key` (`key`) ) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; --- dbSiacoinElement -CREATE TABLE `siacoin_elements` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT, - `created_at` datetime(3) DEFAULT NULL, - `value` longtext, - `address` varbinary(32) DEFAULT NULL, - `output_id` varbinary(32) NOT NULL, - `maturity_height` bigint unsigned DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `output_id` (`output_id`), - KEY `idx_siacoin_elements_output_id` (`output_id`), - KEY `idx_siacoin_elements_maturity_height` (`maturity_height`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; - -- dbSlice CREATE TABLE `slices` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, @@ -379,23 +364,6 @@ CREATE TABLE `slices` ( CONSTRAINT `fk_slabs_slices` FOREIGN KEY (`db_slab_id`) REFERENCES `slabs` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; --- dbTransaction -CREATE TABLE `transactions` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT, - `created_at` datetime(3) DEFAULT NULL, - `raw` longtext, - `height` bigint unsigned DEFAULT NULL, - `block_id` varbinary(32) DEFAULT NULL, - `transaction_id` varbinary(32) NOT NULL, - `inflow` longtext, - `outflow` longtext, - `timestamp` bigint DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `transaction_id` (`transaction_id`), - KEY `idx_transactions_transaction_id` (`transaction_id`), - KEY `idx_transactions_timestamp` (`timestamp`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; - -- dbWebhook CREATE TABLE `webhooks` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, @@ -480,5 +448,44 @@ CREATE TABLE `syncer_bans` ( KEY `idx_syncer_bans_expiration` (`expiration`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; +-- dbWalletEvent +CREATE TABLE `wallet_events` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `event_id` varbinary(32) NOT NULL, + `inflow` longtext, + `outflow` longtext, + `transaction` longtext, + `maturity_height` bigint unsigned DEFAULT NULL, + `source` longtext, + `timestamp` bigint DEFAULT NULL, + `height` bigint unsigned DEFAULT NULL, + `block_id` varbinary(32) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `event_id` (`event_id`), + KEY `idx_wallet_events_maturity_height` (`maturity_height`), + KEY `idx_wallet_events_source` (`source`(191)), -- 191 is the max length for utf8mb4 + KEY `idx_wallet_events_timestamp` (`timestamp`), + KEY `idx_wallet_events_height` (`height`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +-- dbWalletOutput +CREATE TABLE `wallet_outputs` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `output_id` varbinary(32) NOT NULL, + `leaf_index` bigint, + `merkle_proof` blob NOT NULL, + `value` longtext, + `address` varbinary(32) DEFAULT NULL, + `maturity_height` bigint unsigned DEFAULT NULL, + `height` bigint unsigned DEFAULT NULL, + `block_id` varbinary(32) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `output_id` (`output_id`), + KEY `idx_wallet_outputs_maturity_height` (`maturity_height`), + KEY `idx_wallet_outputs_height` (`height`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + -- create default bucket -INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); \ No newline at end of file +INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); diff --git a/stores/migrations/sqlite/main/migration_00007_coreutils_wallet.sql b/stores/migrations/sqlite/main/migration_00007_coreutils_wallet.sql new file mode 100644 index 000000000..7d67af025 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00007_coreutils_wallet.sql @@ -0,0 +1,20 @@ +-- drop tables +DROP TABLE IF EXISTS `siacoin_elements`; +DROP TABLE IF EXISTS `transactions`; + +-- drop column +ALTER TABLE `consensus_infos` DROP COLUMN `cc_id`; + +-- dbWalletEvent +CREATE TABLE `wallet_events` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`event_id` blob NOT NULL,`inflow` text,`outflow` text,`transaction` text,`maturity_height` integer,`source` text,`timestamp` integer,`height` integer, `block_id` blob); +CREATE UNIQUE INDEX `idx_wallet_events_event_id` ON `wallet_events`(`event_id`); +CREATE INDEX `idx_wallet_events_maturity_height` ON `wallet_events`(`maturity_height`); +CREATE INDEX `idx_wallet_events_source` ON `wallet_events`(`source`); +CREATE INDEX `idx_wallet_events_timestamp` ON `wallet_events`(`timestamp`); +CREATE INDEX `idx_wallet_events_height` ON `wallet_events`(`height`); + +-- dbWalletOutput +CREATE TABLE `wallet_outputs` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`output_id` blob NOT NULL,`leaf_index` integer,`merkle_proof` blob NOT NULL,`value` text,`address` blob,`maturity_height` integer,`height` integer, `block_id` blob); +CREATE UNIQUE INDEX `idx_wallet_outputs_output_id` ON `wallet_outputs`(`output_id`); +CREATE INDEX `idx_wallet_outputs_maturity_height` ON `wallet_outputs`(`maturity_height`); +CREATE INDEX `idx_wallet_outputs_height` ON `wallet_outputs`(`height`); \ No newline at end of file diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 2e696954c..b2c7c90c6 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -101,7 +101,7 @@ CREATE INDEX `idx_slices_db_multipart_part_id` ON `slices`(`db_multipart_part_id CREATE TABLE `host_announcements` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`host_key` blob NOT NULL,`block_height` integer,`block_id` text,`net_address` text); -- dbConsensusInfo -CREATE TABLE `consensus_infos` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`cc_id` blob,`height` integer,`block_id` blob); +CREATE TABLE `consensus_infos` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`height` integer,`block_id` blob); -- dbBlocklistEntry CREATE TABLE `host_blocklist_entries` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`entry` text NOT NULL UNIQUE); @@ -119,16 +119,6 @@ CREATE INDEX `idx_host_allowlist_entries_entry` ON `host_allowlist_entries`(`ent CREATE TABLE `host_allowlist_entry_hosts` (`db_allowlist_entry_id` integer,`db_host_id` integer,PRIMARY KEY (`db_allowlist_entry_id`,`db_host_id`),CONSTRAINT `fk_host_allowlist_entry_hosts_db_allowlist_entry` FOREIGN KEY (`db_allowlist_entry_id`) REFERENCES `host_allowlist_entries`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_host_allowlist_entry_hosts_db_host` FOREIGN KEY (`db_host_id`) REFERENCES `hosts`(`id`) ON DELETE CASCADE); CREATE INDEX `idx_host_allowlist_entry_hosts_db_host_id` ON `host_allowlist_entry_hosts`(`db_host_id`); --- dbSiacoinElement -CREATE TABLE `siacoin_elements` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`value` text,`address` blob,`output_id` blob NOT NULL UNIQUE,`maturity_height` integer); -CREATE INDEX `idx_siacoin_elements_maturity_height` ON `siacoin_elements`(`maturity_height`); -CREATE INDEX `idx_siacoin_elements_output_id` ON `siacoin_elements`(`output_id`); - --- dbTransaction -CREATE TABLE `transactions` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`raw` text,`height` integer,`block_id` blob,`transaction_id` blob NOT NULL UNIQUE,`inflow` text,`outflow` text,`timestamp` integer); -CREATE INDEX `idx_transactions_timestamp` ON `transactions`(`timestamp`); -CREATE INDEX `idx_transactions_transaction_id` ON `transactions`(`transaction_id`); - -- dbSetting CREATE TABLE `settings` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`key` text NOT NULL UNIQUE,`value` text NOT NULL); CREATE INDEX `idx_settings_key` ON `settings`(`key`); @@ -195,5 +185,19 @@ CREATE TABLE `syncer_bans` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` CREATE UNIQUE INDEX `idx_syncer_bans_net_cidr` ON `syncer_bans`(`net_cidr`); CREATE INDEX `idx_syncer_bans_expiration` ON `syncer_bans`(`expiration`); +-- dbWalletEvent +CREATE TABLE `wallet_events` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`event_id` blob NOT NULL,`inflow` text,`outflow` text,`transaction` text,`maturity_height` integer,`source` text,`timestamp` integer,`height` integer, `block_id` blob); +CREATE UNIQUE INDEX `idx_wallet_events_event_id` ON `wallet_events`(`event_id`); +CREATE INDEX `idx_wallet_events_maturity_height` ON `wallet_events`(`maturity_height`); +CREATE INDEX `idx_wallet_events_source` ON `wallet_events`(`source`); +CREATE INDEX `idx_wallet_events_timestamp` ON `wallet_events`(`timestamp`); +CREATE INDEX `idx_wallet_events_height` ON `wallet_events`(`height`); + +-- dbWalletOutput +CREATE TABLE `wallet_outputs` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`output_id` blob NOT NULL,`leaf_index` integer,`merkle_proof` blob NOT NULL,`value` text,`address` blob,`maturity_height` integer,`height` integer, `block_id` blob); +CREATE UNIQUE INDEX `idx_wallet_outputs_output_id` ON `wallet_outputs`(`output_id`); +CREATE INDEX `idx_wallet_outputs_maturity_height` ON `wallet_outputs`(`maturity_height`); +CREATE INDEX `idx_wallet_outputs_height` ON `wallet_outputs`(`height`); + -- create default bucket INSERT INTO buckets (created_at, name) VALUES (CURRENT_TIMESTAMP, 'default'); diff --git a/stores/peers.go b/stores/peers.go index fd8554b4b..de0f8c008 100644 --- a/stores/peers.go +++ b/stores/peers.go @@ -33,11 +33,6 @@ type ( } ) -var ( - // TODO: use syncer.ErrPeerNotFound when added - ErrPeerNotFound = errors.New("peer not found") -) - var ( _ syncer.PeerStore = (*SQLStore)(nil) ) @@ -88,6 +83,22 @@ func (s *SQLStore) Peers() ([]syncer.PeerInfo, error) { return infos, nil } +// PeerInfo returns the metadata for the specified peer or ErrPeerNotFound +// if the peer wasn't found in the store. +func (s *SQLStore) PeerInfo(addr string) (syncer.PeerInfo, error) { + var peer dbSyncerPeer + err := s.db. + Where("address = ?", addr). + Take(&peer). + Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return syncer.PeerInfo{}, syncer.ErrPeerNotFound + } else if err != nil { + return syncer.PeerInfo{}, err + } + return peer.info(), nil +} + // UpdatePeerInfo updates the metadata for the specified peer. If the peer // is not found, the error should be ErrPeerNotFound. func (s *SQLStore) UpdatePeerInfo(addr string, fn func(*syncer.PeerInfo)) error { @@ -98,7 +109,7 @@ func (s *SQLStore) UpdatePeerInfo(addr string, fn func(*syncer.PeerInfo)) error Take(&peer). Error if errors.Is(err, gorm.ErrRecordNotFound) { - return ErrPeerNotFound + return syncer.ErrPeerNotFound } else if err != nil { return err } diff --git a/stores/peers_test.go b/stores/peers_test.go index d21c675e2..64ecf7132 100644 --- a/stores/peers_test.go +++ b/stores/peers_test.go @@ -17,7 +17,7 @@ func TestPeers(t *testing.T) { // assert ErrPeerNotFound before we add it err := ss.UpdatePeerInfo(testPeer, func(info *syncer.PeerInfo) {}) - if err != ErrPeerNotFound { + if err != syncer.ErrPeerNotFound { t.Fatal("expected peer not found") } diff --git a/stores/sql.go b/stores/sql.go index 2745ffbf9..9a1fc73a0 100644 --- a/stores/sql.go +++ b/stores/sql.go @@ -13,10 +13,10 @@ import ( "go.sia.tech/core/types" "go.sia.tech/coreutils/chain" + "go.sia.tech/coreutils/syncer" "go.sia.tech/coreutils/wallet" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" - "go.sia.tech/siad/modules" "go.uber.org/zap" "gorm.io/driver/mysql" "gorm.io/driver/sqlite" @@ -74,49 +74,22 @@ type ( dbMetrics *gorm.DB logger *zap.SugaredLogger + // ObjectDB related fields slabBufferMgr *SlabBufferManager - retryTransactionIntervals []time.Duration - - // Persistence buffer - related fields. - lastSave time.Time - persistInterval time.Duration - persistMu sync.Mutex - persistTimer *time.Timer - unappliedAnnouncements []announcement - unappliedContractState map[types.FileContractID]contractState - unappliedHostKeys map[types.PublicKey]struct{} - unappliedRevisions map[types.FileContractID]revisionUpdate - unappliedProofs map[types.FileContractID]uint64 - unappliedOutputChanges []outputChange - unappliedTxnChanges []txnChange - - // HostDB related fields - announcementMaxAge time.Duration - - // SettingsDB related fields. + // SettingsDB related fields settingsMu sync.Mutex settings map[string]string - // WalletDB related fields. - walletAddress types.Address - - // Consensus related fields. - ccid modules.ConsensusChangeID - chainIndex types.ChainIndex + retryTransactionIntervals []time.Duration shutdownCtx context.Context shutdownCtxCancel context.CancelFunc - slabPruneSigChan chan struct{} - - wg sync.WaitGroup mu sync.Mutex hasAllowlist bool hasBlocklist bool closed bool - - knownContracts map[types.FileContractID]struct{} } revisionUpdate struct { @@ -174,14 +147,14 @@ func DBConfigFromEnv() (uri, user, password, dbName string) { // NewSQLStore uses a given Dialector to connect to a SQL database. NOTE: Only // pass migrate=true for the first instance of SQLHostDB if you connect via the // same Dialector multiple times. -func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { +func NewSQLStore(cfg Config) (*SQLStore, error) { // Sanity check announcement max age. if cfg.AnnouncementMaxAge == 0 { - return nil, modules.ConsensusChangeID{}, errors.New("announcementMaxAge must be non-zero") + return nil, errors.New("announcementMaxAge must be non-zero") } if err := os.MkdirAll(cfg.PartialSlabDir, 0700); err != nil { - return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to create partial slab dir: %v", err) + return nil, fmt.Errorf("failed to create partial slab dir '%s': %v", cfg.PartialSlabDir, err) } db, err := gorm.Open(cfg.Conn, &gorm.Config{ Logger: cfg.GormLogger, // custom logger @@ -189,13 +162,13 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { DisableNestedTransaction: true, }) if err != nil { - return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to open SQL db") + return nil, fmt.Errorf("failed to open SQL db") } dbMetrics, err := gorm.Open(cfg.ConnMetrics, &gorm.Config{ Logger: cfg.GormLogger, // custom logger }) if err != nil { - return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to open metrics db") + return nil, fmt.Errorf("failed to open metrics db") } l := cfg.Logger.Named("sql") @@ -210,82 +183,39 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { dbName = "MySQL" } if err != nil { - return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to fetch db version: %v", err) + return nil, fmt.Errorf("failed to fetch db version: %v", err) } l.Infof("Using %s version %s", dbName, dbVersion) // Perform migrations. if cfg.Migrate { if err := performMigrations(db, l); err != nil { - return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to perform migrations: %v", err) + return nil, fmt.Errorf("failed to perform migrations: %v", err) } if err := performMetricsMigrations(dbMetrics, l); err != nil { - return nil, modules.ConsensusChangeID{}, fmt.Errorf("failed to perform migrations for metrics db: %v", err) + return nil, fmt.Errorf("failed to perform migrations for metrics db: %v", err) } } - // Get latest consensus change ID or init db. - ci, ccid, err := initConsensusInfo(db) - if err != nil { - return nil, modules.ConsensusChangeID{}, err - } - // Check allowlist and blocklist counts allowlistCnt, err := tableCount(db, &dbAllowlistEntry{}) if err != nil { - return nil, modules.ConsensusChangeID{}, err + return nil, err } blocklistCnt, err := tableCount(db, &dbBlocklistEntry{}) if err != nil { - return nil, modules.ConsensusChangeID{}, err - } - - // Fetch contract ids. - var activeFCIDs, archivedFCIDs []fileContractID - if err := db.Model(&dbContract{}). - Select("fcid"). - Find(&activeFCIDs).Error; err != nil { - return nil, modules.ConsensusChangeID{}, err - } - if err := db.Model(&dbArchivedContract{}). - Select("fcid"). - Find(&archivedFCIDs).Error; err != nil { - return nil, modules.ConsensusChangeID{}, err - } - isOurContract := make(map[types.FileContractID]struct{}) - for _, fcid := range append(activeFCIDs, archivedFCIDs...) { - isOurContract[types.FileContractID(fcid)] = struct{}{} + return nil, err } - // Create chain subscriber - cs := NewChainSubscriber(db, cfg.Logger, cfg.RetryTransactionIntervals, cfg.PersistInterval, cfg.WalletAddress, cfg.AnnouncementMaxAge) - shutdownCtx, shutdownCtxCancel := context.WithCancel(context.Background()) ss := &SQLStore{ - alerts: cfg.Alerts, - cs: cs, - db: db, - dbMetrics: dbMetrics, - logger: l, - knownContracts: isOurContract, - lastSave: time.Now(), - persistInterval: cfg.PersistInterval, - hasAllowlist: allowlistCnt > 0, - hasBlocklist: blocklistCnt > 0, - settings: make(map[string]string), - slabPruneSigChan: make(chan struct{}, 1), - unappliedContractState: make(map[types.FileContractID]contractState), - unappliedHostKeys: make(map[types.PublicKey]struct{}), - unappliedRevisions: make(map[types.FileContractID]revisionUpdate), - unappliedProofs: make(map[types.FileContractID]uint64), - - announcementMaxAge: cfg.AnnouncementMaxAge, - - walletAddress: cfg.WalletAddress, - chainIndex: types.ChainIndex{ - Height: ci.Height, - ID: types.BlockID(ci.BlockID), - }, + alerts: cfg.Alerts, + db: db, + dbMetrics: dbMetrics, + logger: l, + hasAllowlist: allowlistCnt > 0, + hasBlocklist: blocklistCnt > 0, + settings: make(map[string]string), retryTransactionIntervals: cfg.RetryTransactionIntervals, @@ -293,11 +223,16 @@ func NewSQLStore(cfg Config) (*SQLStore, modules.ConsensusChangeID, error) { shutdownCtxCancel: shutdownCtxCancel, } + ss.cs, err = newChainSubscriber(ss, cfg.Logger, cfg.RetryTransactionIntervals, cfg.PersistInterval, cfg.WalletAddress, cfg.AnnouncementMaxAge) + if err != nil { + return nil, err + } + ss.slabBufferMgr, err = newSlabBufferManager(ss, cfg.SlabBufferCompletionThreshold, cfg.PartialSlabDir) if err != nil { - return nil, modules.ConsensusChangeID{}, err + return nil, err } - return ss, ccid, nil + return ss, nil } func isSQLite(db *gorm.DB) bool { @@ -351,27 +286,31 @@ func tableCount(db *gorm.DB, model interface{}) (cnt int64, err error) { // Close closes the underlying database connection of the store. func (s *SQLStore) Close() error { s.shutdownCtxCancel() - s.wg.Wait() - db, err := s.db.DB() + err := s.cs.Close() if err != nil { return err } - dbMetrics, err := s.dbMetrics.DB() + + err = s.slabBufferMgr.Close() if err != nil { return err } - err = db.Close() + db, err := s.db.DB() if err != nil { return err } - err = dbMetrics.Close() + dbMetrics, err := s.dbMetrics.DB() if err != nil { return err } - err = s.slabBufferMgr.Close() + err = db.Close() + if err != nil { + return err + } + err = dbMetrics.Close() if err != nil { return err } @@ -382,6 +321,21 @@ func (s *SQLStore) Close() error { return nil } +// ChainIndex returns the last stored chain index. +func (ss *SQLStore) ChainIndex() (types.ChainIndex, error) { + var ci dbConsensusInfo + if err := ss.db. + Where(&dbConsensusInfo{Model: Model{ID: consensusInfoID}}). + FirstOrCreate(&ci). + Error; err != nil { + return types.ChainIndex{}, err + } + return types.ChainIndex{ + Height: ci.Height, + ID: types.BlockID(ci.BlockID), + }, nil +} + // ProcessChainApplyUpdate implements chain.Subscriber. func (s *SQLStore) ProcessChainApplyUpdate(cau *chain.ApplyUpdate, mayCommit bool) error { return s.cs.ProcessChainApplyUpdate(cau, mayCommit) @@ -392,149 +346,6 @@ func (s *SQLStore) ProcessChainRevertUpdate(cru *chain.RevertUpdate) error { return s.cs.ProcessChainRevertUpdate(cru) } -// ProcessConsensusChange implements consensus.Subscriber. -func (ss *SQLStore) ProcessConsensusChange(cc modules.ConsensusChange) { - ss.persistMu.Lock() - defer ss.persistMu.Unlock() - - ss.processConsensusChangeHostDB(cc) - ss.processConsensusChangeContracts(cc) - ss.processConsensusChangeWallet(cc) - - // Update consensus fields. - ss.ccid = cc.ID - ss.chainIndex = types.ChainIndex{ - Height: uint64(cc.BlockHeight), - ID: types.BlockID(cc.AppliedBlocks[len(cc.AppliedBlocks)-1].ID()), - } - - // Try to apply the updates. - if err := ss.applyUpdates(false); err != nil { - ss.logger.Error(fmt.Sprintf("failed to apply updates, err: %v", err)) - } - - // Force a persist if no block has been received for some time. - if ss.persistTimer != nil { - ss.persistTimer.Stop() - select { - case <-ss.persistTimer.C: - default: - } - } - ss.persistTimer = time.AfterFunc(10*time.Second, func() { - ss.mu.Lock() - if ss.closed { - ss.mu.Unlock() - return - } - ss.mu.Unlock() - - ss.persistMu.Lock() - defer ss.persistMu.Unlock() - if err := ss.applyUpdates(true); err != nil { - ss.logger.Error(fmt.Sprintf("failed to apply updates, err: %v", err)) - } - }) -} - -// applyUpdates applies all unapplied updates to the database. -func (ss *SQLStore) applyUpdates(force bool) error { - // Check if we need to apply changes - persistIntervalPassed := time.Since(ss.lastSave) > ss.persistInterval // enough time has passed since last persist - softLimitReached := len(ss.unappliedAnnouncements) >= announcementBatchSoftLimit // enough announcements have accumulated - unappliedRevisionsOrProofs := len(ss.unappliedRevisions) > 0 || len(ss.unappliedProofs) > 0 // enough revisions/proofs have accumulated - unappliedOutputsOrTxns := len(ss.unappliedOutputChanges) > 0 || len(ss.unappliedTxnChanges) > 0 // enough outputs/txns have accumualted - unappliedContractState := len(ss.unappliedContractState) > 0 // the chain state of a contract changed - if !force && !persistIntervalPassed && !softLimitReached && !unappliedRevisionsOrProofs && !unappliedOutputsOrTxns && !unappliedContractState { - return nil - } - - // Fetch allowlist - var allowlist []dbAllowlistEntry - if err := ss.db. - Model(&dbAllowlistEntry{}). - Find(&allowlist). - Error; err != nil { - ss.logger.Error(fmt.Sprintf("failed to fetch allowlist, err: %v", err)) - } - - // Fetch blocklist - var blocklist []dbBlocklistEntry - if err := ss.db. - Model(&dbBlocklistEntry{}). - Find(&blocklist). - Error; err != nil { - ss.logger.Error(fmt.Sprintf("failed to fetch blocklist, err: %v", err)) - } - - err := ss.retryTransaction(func(tx *gorm.DB) (err error) { - if len(ss.unappliedAnnouncements) > 0 { - if err = insertAnnouncements(tx, ss.unappliedAnnouncements); err != nil { - return fmt.Errorf("%w; failed to insert %d announcements", err, len(ss.unappliedAnnouncements)) - } - } - if len(ss.unappliedHostKeys) > 0 && (len(allowlist)+len(blocklist)) > 0 { - for host := range ss.unappliedHostKeys { - if err := updateBlocklist(tx, host, allowlist, blocklist); err != nil { - ss.logger.Error(fmt.Sprintf("failed to update blocklist, err: %v", err)) - } - } - } - for fcid, rev := range ss.unappliedRevisions { - if err := applyRevisionUpdate(tx, types.FileContractID(fcid), rev); err != nil { - return fmt.Errorf("%w; failed to update revision number and height", err) - } - } - for fcid, proofHeight := range ss.unappliedProofs { - if err := updateProofHeight(tx, types.FileContractID(fcid), proofHeight); err != nil { - return fmt.Errorf("%w; failed to update proof height", err) - } - } - for _, oc := range ss.unappliedOutputChanges { - if oc.addition { - err = applyUnappliedOutputAdditions(tx, oc.sco) - } else { - err = applyUnappliedOutputRemovals(tx, oc.oid) - } - if err != nil { - return fmt.Errorf("%w; failed to apply unapplied output change", err) - } - } - for _, tc := range ss.unappliedTxnChanges { - if tc.addition { - err = applyUnappliedTxnAdditions(tx, tc.txn) - } else { - err = applyUnappliedTxnRemovals(tx, tc.txnID) - } - if err != nil { - return fmt.Errorf("%w; failed to apply unapplied txn change", err) - } - } - for fcid, cs := range ss.unappliedContractState { - if err := updateContractState(tx, fcid, cs); err != nil { - return fmt.Errorf("%w; failed to update chain state", err) - } - } - if err := markFailedContracts(tx, ss.chainIndex.Height); err != nil { - return err - } - return updateCCID(tx, ss.ccid, ss.chainIndex) - }) - if err != nil { - return fmt.Errorf("%w; failed to apply updates", err) - } - - ss.unappliedContractState = make(map[types.FileContractID]contractState) - ss.unappliedProofs = make(map[types.FileContractID]uint64) - ss.unappliedRevisions = make(map[types.FileContractID]revisionUpdate) - ss.unappliedHostKeys = make(map[types.PublicKey]struct{}) - ss.unappliedAnnouncements = ss.unappliedAnnouncements[:0] - ss.lastSave = time.Now() - ss.unappliedOutputChanges = nil - ss.unappliedTxnChanges = nil - return nil -} - func retryTransaction(db *gorm.DB, logger *zap.SugaredLogger, fc func(tx *gorm.DB) error, intervals []time.Duration, opts ...*sql.TxOptions) error { abortRetry := func(err error) bool { if err == nil || @@ -554,7 +365,7 @@ func retryTransaction(db *gorm.DB, logger *zap.SugaredLogger, fc func(tx *gorm.D strings.Contains(err.Error(), "Duplicate entry") || errors.Is(err, api.ErrPartNotFound) || errors.Is(err, api.ErrSlabNotFound) || - errors.Is(err, ErrPeerNotFound) { + errors.Is(err, syncer.ErrPeerNotFound) { return true } return false @@ -574,48 +385,3 @@ func retryTransaction(db *gorm.DB, logger *zap.SugaredLogger, fc func(tx *gorm.D func (s *SQLStore) retryTransaction(fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) error { return retryTransaction(s.db, s.logger, fc, s.retryTransactionIntervals, opts...) } - -func initConsensusInfo(db *gorm.DB) (dbConsensusInfo, modules.ConsensusChangeID, error) { - var ci dbConsensusInfo - if err := db. - Where(&dbConsensusInfo{Model: Model{ID: consensusInfoID}}). - Attrs(dbConsensusInfo{ - Model: Model{ID: consensusInfoID}, - CCID: modules.ConsensusChangeBeginning[:], - }). - FirstOrCreate(&ci). - Error; err != nil { - return dbConsensusInfo{}, modules.ConsensusChangeID{}, err - } - var ccid modules.ConsensusChangeID - copy(ccid[:], ci.CCID) - return ci, ccid, nil -} - -func (s *SQLStore) ResetConsensusSubscription() error { - // empty tables and reinit consensus_infos - var ci dbConsensusInfo - err := s.retryTransaction(func(tx *gorm.DB) error { - if err := s.db.Exec("DELETE FROM consensus_infos").Error; err != nil { - return err - } else if err := s.db.Exec("DELETE FROM siacoin_elements").Error; err != nil { - return err - } else if err := s.db.Exec("DELETE FROM transactions").Error; err != nil { - return err - } else if ci, _, err = initConsensusInfo(tx); err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - // reset in-memory state. - s.persistMu.Lock() - s.chainIndex = types.ChainIndex{ - Height: ci.Height, - ID: types.BlockID(ci.BlockID), - } - s.persistMu.Unlock() - return nil -} diff --git a/stores/sql_test.go b/stores/sql_test.go index 776e3e10e..caf7720e6 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -1,7 +1,6 @@ package stores import ( - "bytes" "context" "encoding/hex" "fmt" @@ -15,7 +14,6 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" - "go.sia.tech/siad/modules" "go.uber.org/zap" "go.uber.org/zap/zapcore" "gorm.io/gorm" @@ -44,7 +42,6 @@ type testSQLStore struct { dbName string dbMetricsName string dir string - ccid modules.ConsensusChangeID } type testSQLStoreConfig struct { @@ -116,7 +113,7 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { walletAddrs := types.Address(frand.Entropy256()) alerts := alerts.WithOrigin(alerts.NewManager(), "test") - sqlStore, ccid, err := NewSQLStore(Config{ + sqlStore, err := NewSQLStore(Config{ Conn: conn, ConnMetrics: connMetrics, Alerts: alerts, @@ -144,7 +141,6 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { dbName: dbName, dbMetricsName: dbMetricsName, dir: dir, - ccid: ccid, t: t, } } @@ -271,61 +267,11 @@ func (s *SQLStore) overrideSlabHealth(objectID string, health float64) (err erro return } -// TestConsensusReset is a unit test for ResetConsensusSubscription. -func TestConsensusReset(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - if ss.ccid != modules.ConsensusChangeBeginning { - t.Fatal("wrong ccid", ss.ccid, modules.ConsensusChangeBeginning) - } - - // Manually insert into the consenus_infos, the transactions and siacoin_elements tables. - ccid2 := modules.ConsensusChangeID{1} - ss.db.Create(&dbConsensusInfo{ - CCID: ccid2[:], - }) - ss.db.Create(&dbSiacoinElement{ - OutputID: hash256{2}, - }) - ss.db.Create(&dbTransaction{ - TransactionID: hash256{3}, - }) - - // Reset the consensus. - if err := ss.ResetConsensusSubscription(); err != nil { - t.Fatal(err) - } - - // Reopen the SQLStore. - ss = ss.Reopen() - defer ss.Close() - - // Check tables. - var count int64 - if err := ss.db.Model(&dbConsensusInfo{}).Count(&count).Error; err != nil || count != 1 { - t.Fatal("table should have 1 entry", err, count) - } else if err = ss.db.Model(&dbTransaction{}).Count(&count).Error; err != nil || count > 0 { - t.Fatal("table not empty", err) - } else if err = ss.db.Model(&dbSiacoinElement{}).Count(&count).Error; err != nil || count > 0 { - t.Fatal("table not empty", err) - } - - // Check consensus info. - var ci dbConsensusInfo - if err := ss.db.Take(&ci).Error; err != nil { - t.Fatal(err) - } else if !bytes.Equal(ci.CCID, modules.ConsensusChangeBeginning[:]) { - t.Fatal("wrong ccid", ci.CCID, modules.ConsensusChangeBeginning) - } else if ci.Height != 0 { - t.Fatal("wrong height", ci.Height, 0) - } - - // Check SQLStore. - if ss.chainIndex.Height != 0 { - t.Fatal("wrong height", ss.chainIndex.Height, 0) - } else if ss.chainIndex.ID != (types.BlockID{}) { - t.Fatal("wrong id", ss.chainIndex.ID, types.BlockID{}) - } +type queryPlanExplain struct { + ID int `json:"id"` + Parent int `json:"parent"` + NotUsed bool `json:"notused"` + Detail string `json:"detail"` } type sqliteQueryPlan struct { @@ -398,25 +344,3 @@ func TestQueryPlan(t *testing.T) { } } } - -func TestApplyUpdatesErr(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - - before := ss.lastSave - - // drop consensus_infos table to cause update to fail - if err := ss.db.Exec("DROP TABLE consensus_infos").Error; err != nil { - t.Fatal(err) - } - - // call applyUpdates with 'force' set to true - if err := ss.applyUpdates(true); err == nil { - t.Fatal("expected error") - } - - // save shouldn't have happened - if ss.lastSave != before { - t.Fatal("lastSave should not have changed") - } -} diff --git a/stores/subscriber.go b/stores/subscriber.go index 85eeda4bf..c068b1459 100644 --- a/stores/subscriber.go +++ b/stores/subscriber.go @@ -42,31 +42,48 @@ type ( events []eventChange contractState map[types.Hash256]contractState - hosts map[types.PublicKey]struct{} mayCommit bool outputs map[types.Hash256]outputChange proofs map[types.Hash256]uint64 revisions map[types.Hash256]revisionUpdate - transactions []txnChange } ) -func NewChainSubscriber(db *gorm.DB, logger *zap.SugaredLogger, intvls []time.Duration, persistInterval time.Duration, addr types.Address, ancmtMaxAge time.Duration) *chainSubscriber { +func newChainSubscriber(sqlStore *SQLStore, logger *zap.SugaredLogger, intvls []time.Duration, persistInterval time.Duration, walletAddress types.Address, ancmtMaxAge time.Duration) (*chainSubscriber, error) { + // load known contracts + var activeFCIDs []fileContractID + if err := sqlStore.db.Model(&dbContract{}). + Select("fcid"). + Find(&activeFCIDs).Error; err != nil { + return nil, err + } + var archivedFCIDs []fileContractID + if err := sqlStore.db.Model(&dbArchivedContract{}). + Select("fcid"). + Find(&archivedFCIDs).Error; err != nil { + return nil, err + } + knownContracts := make(map[types.FileContractID]struct{}) + for _, fcid := range append(activeFCIDs, archivedFCIDs...) { + knownContracts[types.FileContractID(fcid)] = struct{}{} + } + return &chainSubscriber{ announcementMaxAge: ancmtMaxAge, - db: db, + db: sqlStore.db, logger: logger, retryIntervals: intvls, - walletAddress: addr, - lastSave: time.Now(), - persistInterval: persistInterval, - - contractState: make(map[types.Hash256]contractState), - outputs: make(map[types.Hash256]outputChange), - hosts: make(map[types.PublicKey]struct{}), - proofs: make(map[types.Hash256]uint64), - revisions: make(map[types.Hash256]revisionUpdate), - } + + walletAddress: walletAddress, + lastSave: time.Now(), + persistInterval: persistInterval, + + contractState: make(map[types.Hash256]contractState), + outputs: make(map[types.Hash256]outputChange), + proofs: make(map[types.Hash256]uint64), + revisions: make(map[types.Hash256]revisionUpdate), + knownContracts: knownContracts, + }, nil } func (cs *chainSubscriber) Close() error { @@ -74,12 +91,13 @@ func (cs *chainSubscriber) Close() error { defer cs.mu.Unlock() cs.closed = true - cs.persistTimer.Stop() - select { - case <-cs.persistTimer.C: - default: + if cs.persistTimer != nil { + cs.persistTimer.Stop() + select { + case <-cs.persistTimer.C: + default: + } } - return nil } @@ -95,7 +113,9 @@ func (cs *chainSubscriber) ProcessChainApplyUpdate(cau *chain.ApplyUpdate, mayCo cs.processChainApplyUpdateHostDB(cau) cs.processChainApplyUpdateContracts(cau) - // TODO: handle wallet here + if err := cs.processChainApplyUpdateWallet(cau); err != nil { + return err + } cs.tip = cau.State.Index cs.mayCommit = mayCommit @@ -115,7 +135,9 @@ func (cs *chainSubscriber) ProcessChainRevertUpdate(cru *chain.RevertUpdate) err cs.processChainRevertUpdateHostDB(cru) cs.processChainRevertUpdateContracts(cru) - // TODO: handle wallet here + if err := cs.processChainRevertUpdateWallet(cru); err != nil { + return err + } cs.tip = cru.State.Index cs.mayCommit = true @@ -129,6 +151,12 @@ func (cs *chainSubscriber) Tip() types.ChainIndex { return cs.tip } +func (cs *chainSubscriber) addKnownContract(id types.FileContractID) { + cs.mu.Lock() + defer cs.mu.Unlock() + cs.knownContracts[id] = struct{}{} +} + func (cs *chainSubscriber) isKnownContract(id types.FileContractID) bool { _, ok := cs.knownContracts[id] return ok @@ -158,11 +186,15 @@ func (cs *chainSubscriber) commit() error { if err = insertAnnouncements(tx, cs.announcements); err != nil { return fmt.Errorf("%w; failed to insert %d announcements", err, len(cs.announcements)) } - } - if len(cs.hosts) > 0 && (len(allowlist)+len(blocklist)) > 0 { - for host := range cs.hosts { - if err := updateBlocklist(tx, host, allowlist, blocklist); err != nil { - cs.logger.Error(fmt.Sprintf("failed to update blocklist, err: %v", err)) + if len(allowlist)+len(blocklist) > 0 { + updated := make(map[types.PublicKey]struct{}) + for _, ann := range cs.announcements { + if _, seen := updated[ann.hk]; !seen { + updated[ann.hk] = struct{}{} + if err := updateBlocklist(tx, ann.hk, allowlist, blocklist); err != nil { + cs.logger.Error(fmt.Sprintf("failed to update blocklist, err: %v", err)) + } + } } } } @@ -178,22 +210,22 @@ func (cs *chainSubscriber) commit() error { } for _, oc := range cs.outputs { if oc.addition { - err = applyUnappliedOutputAdditions(tx, oc.sco) + err = applyUnappliedOutputAdditions(tx, oc.se) } else { - err = applyUnappliedOutputRemovals(tx, oc.oid) + err = applyUnappliedOutputRemovals(tx, oc.se.OutputID) } if err != nil { return fmt.Errorf("%w; failed to apply unapplied output change", err) } } - for _, tc := range cs.transactions { + for _, tc := range cs.events { if tc.addition { - err = applyUnappliedTxnAdditions(tx, tc.txn) + err = applyUnappliedEventAdditions(tx, tc.event) } else { - err = applyUnappliedTxnRemovals(tx, tc.txnID) + err = applyUnappliedEventRemovals(tx, tc.event.EventID) } if err != nil { - return fmt.Errorf("%w; failed to apply unapplied txn change", err) + return fmt.Errorf("%w; failed to apply unapplied event change", err) } } for fcid, cs := range cs.contractState { @@ -212,28 +244,24 @@ func (cs *chainSubscriber) commit() error { cs.announcements = nil cs.contractState = make(map[types.Hash256]contractState) - cs.hosts = make(map[types.PublicKey]struct{}) cs.mayCommit = false - cs.outputs = nil + cs.outputs = make(map[types.Hash256]outputChange) cs.proofs = make(map[types.Hash256]uint64) cs.revisions = make(map[types.Hash256]revisionUpdate) - cs.transactions = nil + cs.events = nil cs.lastSave = time.Now() return nil } // shouldCommit returns whether the subscriber should commit its buffered state. func (cs *chainSubscriber) shouldCommit() bool { - mayCommit := cs.mayCommit - persistIntervalPassed := time.Since(cs.lastSave) > cs.persistInterval - hasAnnouncements := len(cs.announcements) > 0 - hasRevisions := len(cs.revisions) > 0 - hasProofs := len(cs.proofs) > 0 - hasOutputChanges := len(cs.outputs) > 0 - hasTxnChanges := len(cs.transactions) > 0 - hasContractState := len(cs.contractState) > 0 - return mayCommit || persistIntervalPassed || hasAnnouncements || hasRevisions || - hasProofs || hasOutputChanges || hasTxnChanges || hasContractState + return cs.mayCommit && (time.Since(cs.lastSave) > cs.persistInterval || + len(cs.announcements) > 0 || + len(cs.revisions) > 0 || + len(cs.proofs) > 0 || + len(cs.outputs) > 0 || + len(cs.events) > 0 || + len(cs.contractState) > 0) } func (cs *chainSubscriber) tryCommit() error { @@ -242,9 +270,17 @@ func (cs *chainSubscriber) tryCommit() error { return nil } else if err := cs.commit(); err != nil { cs.logger.Errorw("failed to commit chain update", zap.Error(err)) + return err } // force a persist if no block has been received for some time + if cs.persistTimer != nil { + cs.persistTimer.Stop() + select { + case <-cs.persistTimer.C: + default: + } + } cs.persistTimer = time.AfterFunc(10*time.Second, func() { cs.mu.Lock() defer cs.mu.Unlock() @@ -267,12 +303,12 @@ func (cs *chainSubscriber) processChainApplyUpdateHostDB(cau *chain.ApplyUpdate) return // ignore } cs.announcements = append(cs.announcements, announcement{ - HostAnnouncement: ha, blockHeight: cau.State.Index.Height, blockID: b.ID(), + hk: hk, timestamp: b.Timestamp, + HostAnnouncement: ha, }) - cs.hosts[types.PublicKey(hk)] = struct{}{} }) } @@ -287,7 +323,7 @@ func (cs *chainSubscriber) processChainApplyUpdateContracts(cau *chain.ApplyUpda } // generic helper for processing v1 and v2 contracts - processContract := func(fcid types.Hash256, rev *revision, resolved, valid bool) { + processContract := func(fcid types.Hash256, rev revision, resolved, valid bool) { // ignore irrelevant contracts if !cs.isKnownContract(types.FileContractID(fcid)) { return @@ -302,18 +338,16 @@ func (cs *chainSubscriber) processChainApplyUpdateContracts(cau *chain.ApplyUpda } // renewed: 'active' -> 'complete' - if rev != nil { - cs.revisions[fcid] = revisionUpdate{ - height: cau.State.Index.Height, - number: rev.revisionNumber, - size: rev.fileSize, - } - if rev.revisionNumber == types.MaxRevisionNumber && rev.fileSize == 0 { - cs.contractState[fcid] = contractStateComplete // renewed: 'active' -> 'complete' - cs.logger.Infow("contract state changed: active -> complete", - "fcid", fcid, - "reason", "final revision confirmed") - } + if rev.revisionNumber == types.MaxRevisionNumber && rev.fileSize == 0 { + cs.contractState[fcid] = contractStateComplete // renewed: 'active' -> 'complete' + cs.logger.Infow("contract state changed: active -> complete", + "fcid", fcid, + "reason", "final revision confirmed") + } + cs.revisions[fcid] = revisionUpdate{ + height: cau.State.Index.Height, + number: rev.revisionNumber, + size: rev.fileSize, } // storage proof: 'active' -> 'complete/failed' @@ -335,27 +369,30 @@ func (cs *chainSubscriber) processChainApplyUpdateContracts(cau *chain.ApplyUpda // v1 contracts cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { - var r *revision + var r revision if rev != nil { - r = &revision{ - revisionNumber: rev.FileContract.RevisionNumber, - fileSize: rev.FileContract.Filesize, - } + r.revisionNumber = rev.FileContract.RevisionNumber + r.fileSize = rev.FileContract.Filesize + } else { + r.revisionNumber = fce.FileContract.RevisionNumber + r.fileSize = fce.FileContract.Filesize } processContract(fce.ID, r, resolved, valid) }) // v2 contracts cau.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType) { - var r *revision + var r revision if rev != nil { - r = &revision{ - revisionNumber: rev.V2FileContract.RevisionNumber, - fileSize: rev.V2FileContract.Filesize, - } + r.revisionNumber = rev.V2FileContract.RevisionNumber + r.fileSize = rev.V2FileContract.Filesize + } else { + r.revisionNumber = fce.V2FileContract.RevisionNumber + r.fileSize = fce.V2FileContract.Filesize } - resolved := res != nil - valid := false + + var valid bool + var resolved bool if res != nil { switch res.(type) { case *types.V2FileContractFinalization: @@ -367,6 +404,8 @@ func (cs *chainSubscriber) processChainApplyUpdateContracts(cau *chain.ApplyUpda case *types.V2FileContractExpiration: valid = fce.V2FileContract.Filesize == 0 } + + resolved = true } processContract(fce.ID, r, resolved, valid) }) @@ -467,6 +506,14 @@ func (cs *chainSubscriber) processChainRevertUpdateContracts(cru *chain.RevertUp }) } +func (cs *chainSubscriber) processChainApplyUpdateWallet(cau *chain.ApplyUpdate) error { + return wallet.ApplyChainUpdates(cs, cs.walletAddress, []*chain.ApplyUpdate{cau}) +} + +func (cs *chainSubscriber) processChainRevertUpdateWallet(cru *chain.RevertUpdate) error { + return wallet.RevertChainUpdate(cs, cs.walletAddress, cru) +} + func (cs *chainSubscriber) retryTransaction(fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) error { return retryTransaction(cs.db, cs.logger, fc, cs.retryIntervals, opts...) } diff --git a/stores/wallet.go b/stores/wallet.go index c6a6fefc4..e0e8a256a 100644 --- a/stores/wallet.go +++ b/stores/wallet.go @@ -1,37 +1,16 @@ package stores import ( - "bytes" "math" "time" - "gitlab.com/NebulousLabs/encoding" "go.sia.tech/core/types" "go.sia.tech/coreutils/wallet" - "go.sia.tech/siad/modules" "gorm.io/gorm" + "gorm.io/gorm/clause" ) type ( - dbSiacoinElement struct { - Model - Value currency - Address hash256 `gorm:"size:32"` - OutputID hash256 `gorm:"unique;index;NOT NULL;size:32"` - MaturityHeight uint64 `gorm:"index"` - } - - dbTransaction struct { - Model - Raw types.Transaction `gorm:"serializer:json"` - Height uint64 - BlockID hash256 `gorm:"size:32"` - TransactionID hash256 `gorm:"unique;index;NOT NULL;size:32"` - Inflow currency - Outflow currency - Timestamp int64 `gorm:"index:idx_transactions_timestamp"` - } - dbWalletEvent struct { Model @@ -49,12 +28,6 @@ type ( BlockID hash256 `gorm:"size:32"` } - txnChange struct { - addition bool - txnID hash256 - txn dbTransaction - } - dbWalletOutput struct { Model @@ -74,7 +47,6 @@ type ( outputChange struct { addition bool oid hash256 - sco dbSiacoinElement se dbWalletOutput } @@ -85,16 +57,14 @@ type ( ) // TableName implements the gorm.Tabler interface. -func (dbSiacoinElement) TableName() string { return "siacoin_elements" } - -// TableName implements the gorm.Tabler interface. -func (dbTransaction) TableName() string { return "transactions" } - -// TableName implements the gorm.Tabler interface. -func (dbWalletEvent) TableName() string { return "wallet_events" } +func (dbWalletEvent) TableName() string { + return "wallet_events" +} // TableName implements the gorm.Tabler interface. -func (dbWalletOutput) TableName() string { return "wallet_outputs" } +func (dbWalletOutput) TableName() string { + return "wallet_outputs" +} func (e dbWalletEvent) Index() types.ChainIndex { return types.ChainIndex{ @@ -110,13 +80,6 @@ func (se dbWalletOutput) Index() types.ChainIndex { } } -func (s *SQLStore) Height() uint64 { - s.persistMu.Lock() - height := s.chainIndex.Height - s.persistMu.Unlock() - return height -} - // Tip returns the consensus change ID and block height of the last wallet // change. func (s *SQLStore) Tip() (types.ChainIndex, error) { @@ -125,283 +88,103 @@ func (s *SQLStore) Tip() (types.ChainIndex, error) { // UnspentSiacoinElements returns a list of all unspent siacoin outputs func (s *SQLStore) UnspentSiacoinElements() ([]wallet.SiacoinElement, error) { - var elems []dbSiacoinElement - if err := s.db.Find(&elems).Error; err != nil { + var dbElems []dbWalletOutput + if err := s.db.Find(&dbElems).Error; err != nil { return nil, err } - utxo := make([]wallet.SiacoinElement, len(elems)) - for i := range elems { - utxo[i] = wallet.SiacoinElement{ + elements := make([]wallet.SiacoinElement, len(dbElems)) + for i, el := range dbElems { + elements[i] = wallet.SiacoinElement{ SiacoinElement: types.SiacoinElement{ StateElement: types.StateElement{ - ID: types.Hash256(elems[i].OutputID), - // TODO: LeafIndex missing - // TODO: MerkleProof missing + ID: types.Hash256(el.OutputID), + LeafIndex: el.LeafIndex, + MerkleProof: el.MerkleProof.proof, }, + MaturityHeight: el.MaturityHeight, SiacoinOutput: types.SiacoinOutput{ - Address: types.Address(elems[i].Address), - Value: types.Currency(elems[i].Value), + Address: types.Address(el.Address), + Value: types.Currency(el.Value), }, - MaturityHeight: elems[i].MaturityHeight, + }, + Index: types.ChainIndex{ + Height: el.Height, + ID: types.BlockID(el.BlockID), }, // TODO: Index missing } } - return utxo, nil + return elements, nil } -// WalletEvents returns a paginated list of transactions ordered by maturity -// height, descending. If no more transactions are available, (nil, nil) should -// be returned. +// WalletEvents returns a paginated list of events, ordered by maturity height, +// descending. If no more events are available, (nil, nil) is returned. func (s *SQLStore) WalletEvents(offset, limit int) ([]wallet.Event, error) { if limit == 0 || limit == -1 { limit = math.MaxInt64 } - var dbTxns []dbTransaction - err := s.db.Raw("SELECT * FROM transactions ORDER BY timestamp DESC LIMIT ? OFFSET ?", - limit, offset).Scan(&dbTxns). + var dbEvents []dbWalletEvent + err := s.db.Raw("SELECT * FROM wallet_events ORDER BY timestamp DESC LIMIT ? OFFSET ?", + limit, offset).Scan(&dbEvents). Error if err != nil { return nil, err } - txns := make([]wallet.Event, len(dbTxns)) - for i := range dbTxns { - txns[i] = wallet.Event{ - Transaction: dbTxns[i].Raw, + events := make([]wallet.Event, len(dbEvents)) + for i, e := range dbEvents { + events[i] = wallet.Event{ + ID: types.Hash256(e.EventID), Index: types.ChainIndex{ - Height: dbTxns[i].Height, - ID: types.BlockID(dbTxns[i].BlockID), + Height: e.Height, + ID: types.BlockID(e.BlockID), }, - ID: types.Hash256(dbTxns[i].TransactionID), - Inflow: types.Currency(dbTxns[i].Inflow), - Outflow: types.Currency(dbTxns[i].Outflow), - Timestamp: time.Unix(dbTxns[i].Timestamp, 0), + Inflow: types.Currency(e.Inflow), + Outflow: types.Currency(e.Outflow), + Transaction: e.Transaction, + Source: wallet.EventSource(e.Source), + MaturityHeight: e.MaturityHeight, + Timestamp: time.Unix(e.Timestamp, 0), } } - return txns, nil + return events, nil } -// WalletEventCount returns the total number of transactions in the wallet. +// WalletEventCount returns the number of events relevant to the wallet. func (s *SQLStore) WalletEventCount() (uint64, error) { var count int64 - if err := s.db.Model(&dbTransaction{}).Count(&count).Error; err != nil { + if err := s.db.Model(&dbWalletEvent{}).Count(&count).Error; err != nil { return 0, err } return uint64(count), nil } -// ProcessConsensusChange implements chain.Subscriber. -func (s *SQLStore) processConsensusChangeWallet(cc modules.ConsensusChange) { - // Add/Remove siacoin outputs. - for _, diff := range cc.SiacoinOutputDiffs { - var sco types.SiacoinOutput - convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) - if sco.Address != s.walletAddress { - continue - } - if diff.Direction == modules.DiffApply { - // add new outputs - s.unappliedOutputChanges = append(s.unappliedOutputChanges, outputChange{ - addition: true, - oid: hash256(diff.ID), - sco: dbSiacoinElement{ - Address: hash256(sco.Address), - Value: currency(sco.Value), - OutputID: hash256(diff.ID), - MaturityHeight: uint64(cc.BlockHeight), // immediately spendable - }, - }) - } else { - // remove reverted outputs - s.unappliedOutputChanges = append(s.unappliedOutputChanges, outputChange{ - addition: false, - oid: hash256(diff.ID), - }) - } - } - - // Create a 'fake' transaction for every matured siacoin output. - for _, diff := range cc.AppliedDiffs { - for _, dsco := range diff.DelayedSiacoinOutputDiffs { - // if a delayed output is reverted in an applied diff, the - // output has matured -- add a payout transaction. - if dsco.Direction != modules.DiffRevert { - continue - } else if types.Address(dsco.SiacoinOutput.UnlockHash) != s.walletAddress { - continue - } - var sco types.SiacoinOutput - convertToCore(dsco.SiacoinOutput, (*types.V1SiacoinOutput)(&sco)) - s.unappliedTxnChanges = append(s.unappliedTxnChanges, txnChange{ - addition: true, - txnID: hash256(dsco.ID), // use output id as txn id - txn: dbTransaction{ - Height: uint64(dsco.MaturityHeight), - Inflow: currency(sco.Value), // transaction inflow is value of matured output - TransactionID: hash256(dsco.ID), // use output as txn id - Timestamp: int64(cc.AppliedBlocks[dsco.MaturityHeight-cc.InitialHeight()-1].Timestamp), // use timestamp of block that caused output to mature - }, - }) - } - } - - // Revert transactions from reverted blocks. - for _, block := range cc.RevertedBlocks { - for _, stxn := range block.Transactions { - var txn types.Transaction - convertToCore(stxn, &txn) - if transactionIsRelevant(txn, s.walletAddress) { - // remove reverted txns - s.unappliedTxnChanges = append(s.unappliedTxnChanges, txnChange{ - addition: false, - txnID: hash256(txn.ID()), - }) - } - } - } - - // Revert 'fake' transactions. - for _, diff := range cc.RevertedDiffs { - for _, dsco := range diff.DelayedSiacoinOutputDiffs { - if dsco.Direction == modules.DiffApply { - s.unappliedTxnChanges = append(s.unappliedTxnChanges, txnChange{ - addition: false, - txnID: hash256(dsco.ID), - }) - } - } - } - - spentOutputs := make(map[types.SiacoinOutputID]types.SiacoinOutput) - for i, block := range cc.AppliedBlocks { - appliedDiff := cc.AppliedDiffs[i] - for _, diff := range appliedDiff.SiacoinOutputDiffs { - if diff.Direction == modules.DiffRevert { - var so types.SiacoinOutput - convertToCore(diff.SiacoinOutput, (*types.V1SiacoinOutput)(&so)) - spentOutputs[types.SiacoinOutputID(diff.ID)] = so - } - } - - for _, stxn := range block.Transactions { - var txn types.Transaction - convertToCore(stxn, &txn) - if transactionIsRelevant(txn, s.walletAddress) { - var inflow, outflow types.Currency - for _, out := range txn.SiacoinOutputs { - if out.Address == s.walletAddress { - inflow = inflow.Add(out.Value) - } - } - for _, in := range txn.SiacoinInputs { - if in.UnlockConditions.UnlockHash() == s.walletAddress { - so, ok := spentOutputs[in.ParentID] - if !ok { - panic("spent output not found") - } - outflow = outflow.Add(so.Value) - } - } - - // add confirmed txns - s.unappliedTxnChanges = append(s.unappliedTxnChanges, txnChange{ - addition: true, - txnID: hash256(txn.ID()), - txn: dbTransaction{ - Raw: txn, - Height: uint64(cc.InitialHeight()) + uint64(i) + 1, - BlockID: hash256(block.ID()), - Inflow: currency(inflow), - Outflow: currency(outflow), - TransactionID: hash256(txn.ID()), - Timestamp: int64(block.Timestamp), - }, - }) - } - } - } -} - -func transactionIsRelevant(txn types.Transaction, addr types.Address) bool { - for i := range txn.SiacoinInputs { - if txn.SiacoinInputs[i].UnlockConditions.UnlockHash() == addr { - return true - } - } - for i := range txn.SiacoinOutputs { - if txn.SiacoinOutputs[i].Address == addr { - return true - } - } - for i := range txn.SiafundInputs { - if txn.SiafundInputs[i].UnlockConditions.UnlockHash() == addr { - return true - } - if txn.SiafundInputs[i].ClaimAddress == addr { - return true - } - } - for i := range txn.SiafundOutputs { - if txn.SiafundOutputs[i].Address == addr { - return true - } - } - for i := range txn.FileContracts { - for _, sco := range txn.FileContracts[i].ValidProofOutputs { - if sco.Address == addr { - return true - } - } - for _, sco := range txn.FileContracts[i].MissedProofOutputs { - if sco.Address == addr { - return true - } - } - } - for i := range txn.FileContractRevisions { - for _, sco := range txn.FileContractRevisions[i].ValidProofOutputs { - if sco.Address == addr { - return true - } - } - for _, sco := range txn.FileContractRevisions[i].MissedProofOutputs { - if sco.Address == addr { - return true - } - } - } - return false -} - -func convertToCore(siad encoding.SiaMarshaler, core types.DecoderFrom) { - var buf bytes.Buffer - siad.MarshalSia(&buf) - d := types.NewBufDecoder(buf.Bytes()) - core.DecodeFrom(d) - if d.Err() != nil { - panic(d.Err()) - } -} - -func applyUnappliedOutputAdditions(tx *gorm.DB, sco dbSiacoinElement) error { - return tx.Create(&sco).Error +func applyUnappliedOutputAdditions(tx *gorm.DB, sco dbWalletOutput) error { + return tx. + Clauses(clause.OnConflict{ + DoNothing: true, + Columns: []clause.Column{{Name: "output_id"}}, + }).Create(&sco).Error } func applyUnappliedOutputRemovals(tx *gorm.DB, oid hash256) error { return tx.Where("output_id", oid). - Delete(&dbSiacoinElement{}). + Delete(&dbWalletOutput{}). Error } -func applyUnappliedTxnAdditions(tx *gorm.DB, txn dbTransaction) error { - return tx.Create(&txn).Error +func applyUnappliedEventAdditions(tx *gorm.DB, event dbWalletEvent) error { + return tx. + Clauses(clause.OnConflict{ + DoNothing: true, + Columns: []clause.Column{{Name: "event_id"}}, + }).Create(&event).Error } -func applyUnappliedTxnRemovals(tx *gorm.DB, txnID hash256) error { - return tx.Where("transaction_id", txnID). - Delete(&dbTransaction{}). +func applyUnappliedEventRemovals(tx *gorm.DB, eventID hash256) error { + return tx.Where("event_id", eventID). + Delete(&dbWalletEvent{}). Error } diff --git a/wallet/wallet.go b/wallet/wallet.go deleted file mode 100644 index f87a972f5..000000000 --- a/wallet/wallet.go +++ /dev/null @@ -1,723 +0,0 @@ -package wallet - -import ( - "bytes" - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "gitlab.com/NebulousLabs/encoding" - "go.sia.tech/core/consensus" - "go.sia.tech/core/types" - "go.sia.tech/coreutils/wallet" - "go.sia.tech/renterd/api" - "go.sia.tech/siad/modules" - "go.uber.org/zap" -) - -const ( - // BytesPerInput is the encoded size of a SiacoinInput and corresponding - // TransactionSignature, assuming standard UnlockConditions. - BytesPerInput = 241 - - // redistributeBatchSize is the number of outputs to redistribute per txn to - // avoid creating a txn that is too large. - redistributeBatchSize = 10 - - // transactionDefragThreshold is the number of utxos at which the wallet - // will attempt to defrag itself by including small utxos in transactions. - transactionDefragThreshold = 30 - // maxInputsForDefrag is the maximum number of inputs a transaction can - // have before the wallet will stop adding inputs - maxInputsForDefrag = 30 - // maxDefragUTXOs is the maximum number of utxos that will be added to a - // transaction when defragging - maxDefragUTXOs = 10 -) - -// ErrInsufficientBalance is returned when there aren't enough unused outputs to -// cover the requested amount. -var ErrInsufficientBalance = errors.New("insufficient balance") - -// StandardUnlockConditions returns the standard unlock conditions for a single -// Ed25519 key. -func StandardUnlockConditions(pk types.PublicKey) types.UnlockConditions { - return types.UnlockConditions{ - PublicKeys: []types.UnlockKey{{ - Algorithm: types.SpecifierEd25519, - Key: pk[:], - }}, - SignaturesRequired: 1, - } -} - -// StandardAddress returns the standard address for an Ed25519 key. -func StandardAddress(pk types.PublicKey) types.Address { - return StandardUnlockConditions(pk).UnlockHash() -} - -// StandardTransactionSignature returns the standard signature object for a -// siacoin or siafund input. -func StandardTransactionSignature(id types.Hash256) types.TransactionSignature { - return types.TransactionSignature{ - ParentID: id, - CoveredFields: types.CoveredFields{WholeTransaction: true}, - PublicKeyIndex: 0, - } -} - -// ExplicitCoveredFields returns a CoveredFields that covers all elements -// present in txn. -func ExplicitCoveredFields(txn types.Transaction) (cf types.CoveredFields) { - for i := range txn.SiacoinInputs { - cf.SiacoinInputs = append(cf.SiacoinInputs, uint64(i)) - } - for i := range txn.SiacoinOutputs { - cf.SiacoinOutputs = append(cf.SiacoinOutputs, uint64(i)) - } - for i := range txn.FileContracts { - cf.FileContracts = append(cf.FileContracts, uint64(i)) - } - for i := range txn.FileContractRevisions { - cf.FileContractRevisions = append(cf.FileContractRevisions, uint64(i)) - } - for i := range txn.StorageProofs { - cf.StorageProofs = append(cf.StorageProofs, uint64(i)) - } - for i := range txn.SiafundInputs { - cf.SiafundInputs = append(cf.SiafundInputs, uint64(i)) - } - for i := range txn.SiafundOutputs { - cf.SiafundOutputs = append(cf.SiafundOutputs, uint64(i)) - } - for i := range txn.MinerFees { - cf.MinerFees = append(cf.MinerFees, uint64(i)) - } - for i := range txn.ArbitraryData { - cf.ArbitraryData = append(cf.ArbitraryData, uint64(i)) - } - for i := range txn.Signatures { - cf.Signatures = append(cf.Signatures, uint64(i)) - } - return -} - -// A SiacoinElement is a SiacoinOutput along with its ID. -type SiacoinElement struct { - types.SiacoinOutput - ID types.Hash256 `json:"id"` - MaturityHeight uint64 `json:"maturityHeight"` -} - -func convertToSiacoinElement(sce wallet.SiacoinElement) SiacoinElement { - return SiacoinElement{ - ID: sce.StateElement.ID, - SiacoinOutput: types.SiacoinOutput{ - Value: sce.SiacoinOutput.Value, - Address: sce.SiacoinOutput.Address, - }, - MaturityHeight: sce.MaturityHeight, - } -} - -func converToTransaction(txn wallet.Event) Transaction { - return Transaction{ - Raw: txn.Transaction, - Index: txn.Index, - ID: types.TransactionID(txn.ID), - Inflow: txn.Inflow, - Outflow: txn.Outflow, - Timestamp: txn.Timestamp, - } -} - -// A Transaction is an on-chain transaction relevant to a particular wallet, -// paired with useful metadata. -type Transaction struct { - Raw types.Transaction `json:"raw,omitempty"` - Index types.ChainIndex `json:"index"` - ID types.TransactionID `json:"id"` - Inflow types.Currency `json:"inflow"` - Outflow types.Currency `json:"outflow"` - Timestamp time.Time `json:"timestamp"` -} - -// A SingleAddressStore stores the state of a single-address wallet. -// Implementations are assumed to be thread safe. -type SingleAddressStore interface { - wallet.SingleAddressStore - - // TODO PJ: this needs to move out of the store interface, perhaps we can - // wrap the SingleAddressWallet from coreutils and subscribe it to record - // the wallet metrics in the store on every change in consensus. - RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error -} - -// A TransactionPool contains transactions that have not yet been included in a -// block. -type TransactionPool interface { - ContainsElement(id types.Hash256) bool -} - -// A SingleAddressWallet is a hot wallet that manages the outputs controlled by -// a single address. -type SingleAddressWallet struct { - log *zap.SugaredLogger - priv types.PrivateKey - addr types.Address - store SingleAddressStore - usedUTXOExpiry time.Duration - - // for building transactions - mu sync.Mutex - lastUsed map[types.Hash256]time.Time - // tpoolTxns maps a transaction set ID to the transactions in that set - tpoolTxns map[types.Hash256][]Transaction - // tpoolUtxos maps a siacoin output ID to its corresponding siacoin - // element. It is used to track siacoin outputs that are currently in - // the transaction pool. - tpoolUtxos map[types.SiacoinOutputID]SiacoinElement - // tpoolSpent is a set of siacoin output IDs that are currently in the - // transaction pool. - tpoolSpent map[types.SiacoinOutputID]bool -} - -// PrivateKey returns the private key of the wallet. -func (w *SingleAddressWallet) PrivateKey() types.PrivateKey { - return w.priv -} - -// Address returns the address of the wallet. -func (w *SingleAddressWallet) Address() types.Address { - return w.addr -} - -// Balance returns the balance of the wallet. -func (w *SingleAddressWallet) Balance() (spendable, confirmed, unconfirmed types.Currency, _ error) { - sces, err := w.unspentSiacoinElements() - if err != nil { - return types.Currency{}, types.Currency{}, types.Currency{}, err - } - - // fetch block height - tip, err := w.store.Tip() - if err != nil { - return types.Currency{}, types.Currency{}, types.Currency{}, err - } - bh := tip.Height - - // filter outputs that haven't matured yet - filtered := sces[:0] - for _, sce := range sces { - if sce.MaturityHeight >= bh { - filtered = append(filtered, sce) - } - } - sces = filtered - - w.mu.Lock() - defer w.mu.Unlock() - for _, sce := range sces { - if !w.isOutputUsed(sce.ID) { - spendable = spendable.Add(sce.Value) - } - confirmed = confirmed.Add(sce.Value) - } - for _, sco := range w.tpoolUtxos { - if !w.isOutputUsed(sco.ID) { - unconfirmed = unconfirmed.Add(sco.Value) - } - } - return -} - -// UnspentOutputs returns the set of unspent Siacoin outputs controlled by the -// wallet. -func (w *SingleAddressWallet) UnspentOutputs() ([]SiacoinElement, error) { - sces, err := w.unspentSiacoinElements() - if err != nil { - return nil, err - } - w.mu.Lock() - defer w.mu.Unlock() - filtered := sces[:0] - for _, sce := range sces { - if !w.isOutputUsed(sce.ID) { - filtered = append(filtered, sce) - } - } - return filtered, nil -} - -// Transactions returns up to max transactions relevant to the wallet that have -// a timestamp later than since. -func (w *SingleAddressWallet) Transactions(offset, limit int) ([]Transaction, error) { - txns, err := w.store.WalletEvents(offset, limit) - if err != nil { - return nil, err - } - - out := make([]Transaction, len(txns)) - for i := range txns { - out[i] = converToTransaction(txns[i]) - } - return out, nil -} - -// FundTransaction adds siacoin inputs worth at least the requested amount to -// the provided transaction. A change output is also added, if necessary. The -// inputs will not be available to future calls to FundTransaction unless -// ReleaseInputs is called or enough time has passed. -func (w *SingleAddressWallet) FundTransaction(cs consensus.State, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, error) { - if amount.IsZero() { - return nil, nil - } - - // fetch all unspent siacoin elements - utxos, err := w.unspentSiacoinElements() - if err != nil { - return nil, err - } - - // desc sort - sort.Slice(utxos, func(i, j int) bool { - return utxos[i].Value.Cmp(utxos[j].Value) > 0 - }) - - w.mu.Lock() - defer w.mu.Unlock() - - // add all unconfirmed outputs to the end of the slice as a last resort - if useUnconfirmedTxns { - var tpoolUtxos []SiacoinElement - for _, sco := range w.tpoolUtxos { - tpoolUtxos = append(tpoolUtxos, sco) - } - // desc sort - sort.Slice(tpoolUtxos, func(i, j int) bool { - return tpoolUtxos[i].Value.Cmp(tpoolUtxos[j].Value) > 0 - }) - utxos = append(utxos, tpoolUtxos...) - } - - // remove locked and spent outputs - usableUTXOs := utxos[:0] - for _, sce := range utxos { - if w.isOutputUsed(sce.ID) { - continue - } - usableUTXOs = append(usableUTXOs, sce) - } - - // fund the transaction using the largest utxos first - var selected []SiacoinElement - var inputSum types.Currency - for i, sce := range usableUTXOs { - if inputSum.Cmp(amount) >= 0 { - usableUTXOs = usableUTXOs[i:] - break - } - selected = append(selected, sce) - inputSum = inputSum.Add(sce.Value) - } - - // if the transaction can't be funded, return an error - if inputSum.Cmp(amount) < 0 { - return nil, fmt.Errorf("%w: inputSum: %v, amount: %v", ErrInsufficientBalance, inputSum.String(), amount.String()) - } - - // check if remaining utxos should be defragged - txnInputs := len(txn.SiacoinInputs) + len(selected) - if len(usableUTXOs) > transactionDefragThreshold && txnInputs < maxInputsForDefrag { - // add the smallest utxos to the transaction - defraggable := usableUTXOs - if len(defraggable) > maxDefragUTXOs { - defraggable = defraggable[len(defraggable)-maxDefragUTXOs:] - } - for i := len(defraggable) - 1; i >= 0; i-- { - if txnInputs >= maxInputsForDefrag { - break - } - - sce := defraggable[i] - selected = append(selected, sce) - inputSum = inputSum.Add(sce.Value) - txnInputs++ - } - } - - // add a change output if necessary - if inputSum.Cmp(amount) > 0 { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: inputSum.Sub(amount), - Address: w.addr, - }) - } - - toSign := make([]types.Hash256, len(selected)) - for i, sce := range selected { - txn.SiacoinInputs = append(txn.SiacoinInputs, types.SiacoinInput{ - ParentID: types.SiacoinOutputID(sce.ID), - UnlockConditions: types.StandardUnlockConditions(w.priv.PublicKey()), - }) - toSign[i] = types.Hash256(sce.ID) - w.lastUsed[sce.ID] = time.Now() - } - - return toSign, nil -} - -// ReleaseInputs is a helper function that releases the inputs of txn for use in -// other transactions. It should only be called on transactions that are invalid -// or will never be broadcast. -func (w *SingleAddressWallet) ReleaseInputs(txns ...types.Transaction) { - w.mu.Lock() - defer w.mu.Unlock() - w.releaseInputs(txns...) -} - -func (w *SingleAddressWallet) releaseInputs(txns ...types.Transaction) { - for _, txn := range txns { - for _, in := range txn.SiacoinInputs { - delete(w.lastUsed, types.Hash256(in.ParentID)) - } - } -} - -// SignTransaction adds a signature to each of the specified inputs. -func (w *SingleAddressWallet) SignTransaction(cs consensus.State, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error { - for _, id := range toSign { - ts := types.TransactionSignature{ - ParentID: id, - CoveredFields: cf, - PublicKeyIndex: 0, - } - var h types.Hash256 - if cf.WholeTransaction { - h = cs.WholeSigHash(*txn, ts.ParentID, ts.PublicKeyIndex, ts.Timelock, cf.Signatures) - } else { - h = cs.PartialSigHash(*txn, cf) - } - sig := w.priv.SignHash(h) - ts.Signature = sig[:] - txn.Signatures = append(txn.Signatures, ts) - } - return nil -} - -// Redistribute returns a transaction that redistributes money in the wallet by -// selecting a minimal set of inputs to cover the creation of the requested -// outputs. It also returns a list of output IDs that need to be signed. -func (w *SingleAddressWallet) Redistribute(cs consensus.State, outputs int, amount, feePerByte types.Currency, pool []types.Transaction) ([]types.Transaction, []types.Hash256, error) { - // build map of inputs currently in the tx pool - inPool := make(map[types.Hash256]bool) - for _, ptxn := range pool { - for _, in := range ptxn.SiacoinInputs { - inPool[types.Hash256(in.ParentID)] = true - } - } - - // fetch unspent transaction outputs - utxos, err := w.unspentSiacoinElements() - if err != nil { - return nil, nil, err - } - - w.mu.Lock() - defer w.mu.Unlock() - - // check whether a redistribution is necessary, adjust number of desired - // outputs accordingly - for _, sce := range utxos { - inUse := w.isOutputUsed(sce.ID) || inPool[sce.ID] - matured := cs.Index.Height >= sce.MaturityHeight - sameValue := sce.Value.Equals(amount) - if !inUse && matured && sameValue { - outputs-- - } - } - if outputs <= 0 { - return nil, nil, nil - } - - // desc sort - sort.Slice(utxos, func(i, j int) bool { - return utxos[i].Value.Cmp(utxos[j].Value) > 0 - }) - - // prepare all outputs - var txns []types.Transaction - var toSign []types.Hash256 - - for outputs > 0 { - var txn types.Transaction - for i := 0; i < outputs && i < redistributeBatchSize; i++ { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: amount, - Address: w.Address(), - }) - } - outputs -= len(txn.SiacoinOutputs) - - // estimate the fees - outputFees := feePerByte.Mul64(uint64(len(encoding.Marshal(txn.SiacoinOutputs)))) - feePerInput := feePerByte.Mul64(BytesPerInput) - - // collect outputs that cover the total amount - var inputs []SiacoinElement - want := amount.Mul64(uint64(len(txn.SiacoinOutputs))) - var amtInUse, amtSameValue, amtNotMatured types.Currency - for _, sce := range utxos { - inUse := w.isOutputUsed(sce.ID) || inPool[sce.ID] - matured := cs.Index.Height >= sce.MaturityHeight - sameValue := sce.Value.Equals(amount) - if inUse { - amtInUse = amtInUse.Add(sce.Value) - continue - } else if sameValue { - amtSameValue = amtSameValue.Add(sce.Value) - continue - } else if !matured { - amtNotMatured = amtNotMatured.Add(sce.Value) - continue - } - - inputs = append(inputs, sce) - fee := feePerInput.Mul64(uint64(len(inputs))).Add(outputFees) - if SumOutputs(inputs).Cmp(want.Add(fee)) > 0 { - break - } - } - - // not enough outputs found - fee := feePerInput.Mul64(uint64(len(inputs))).Add(outputFees) - if sumOut := SumOutputs(inputs); sumOut.Cmp(want.Add(fee)) < 0 { - // in case of an error we need to free all inputs - w.releaseInputs(txns...) - return nil, nil, fmt.Errorf("%w: inputs %v < needed %v + txnFee %v (usable: %v, inUse: %v, sameValue: %v, notMatured: %v)", - ErrInsufficientBalance, sumOut.String(), want.String(), fee.String(), sumOut.String(), amtInUse.String(), amtSameValue.String(), amtNotMatured.String()) - } - - // set the miner fee - txn.MinerFees = []types.Currency{fee} - - // add the change output - change := SumOutputs(inputs).Sub(want.Add(fee)) - if !change.IsZero() { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: change, - Address: w.addr, - }) - } - - // add the inputs - for _, sce := range inputs { - txn.SiacoinInputs = append(txn.SiacoinInputs, types.SiacoinInput{ - ParentID: types.SiacoinOutputID(sce.ID), - UnlockConditions: StandardUnlockConditions(w.priv.PublicKey()), - }) - toSign = append(toSign, sce.ID) - w.lastUsed[sce.ID] = time.Now() - } - - txns = append(txns, txn) - } - - return txns, toSign, nil -} - -// Tip returns the consensus change ID and block height of the last wallet -// change. -func (w *SingleAddressWallet) Tip() (types.ChainIndex, error) { - return w.store.Tip() -} -func (w *SingleAddressWallet) isOutputUsed(id types.Hash256) bool { - inPool := w.tpoolSpent[types.SiacoinOutputID(id)] - lastUsed := w.lastUsed[id] - if w.usedUTXOExpiry == 0 { - return !lastUsed.IsZero() || inPool - } - return time.Since(lastUsed) <= w.usedUTXOExpiry || inPool -} - -// ProcessConsensusChange implements modules.ConsensusSetSubscriber. -func (w *SingleAddressWallet) ProcessConsensusChange(cc modules.ConsensusChange) { - // only record when we are synced - if !cc.Synced { - return - } - - // apply sane timeout - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - // fetch balance - spendable, confirmed, unconfirmed, err := w.Balance() - if err != nil { - w.log.Errorf("failed to fetch wallet balance, err: %v", err) - return - } - - // record wallet metric - if err := w.store.RecordWalletMetric(ctx, api.WalletMetric{ - Timestamp: api.TimeNow(), - Confirmed: confirmed, - Unconfirmed: unconfirmed, - Spendable: spendable, - }); err != nil { - w.log.Errorf("failed to record wallet metric, err: %v", err) - return - } -} - -// ReceiveUpdatedUnconfirmedTransactions implements modules.TransactionPoolSubscriber. -func (w *SingleAddressWallet) ReceiveUpdatedUnconfirmedTransactions(diff *modules.TransactionPoolDiff) { - siacoinOutputs := make(map[types.SiacoinOutputID]SiacoinElement) - utxos, err := w.unspentSiacoinElements() - if err != nil { - return - } - for _, output := range utxos { - siacoinOutputs[types.SiacoinOutputID(output.ID)] = output - } - - // fetch current heith - tip, err := w.store.Tip() - if err != nil { - return - } - currentHeight := tip.Height - - w.mu.Lock() - defer w.mu.Unlock() - - for id, output := range w.tpoolUtxos { - siacoinOutputs[id] = output - } - - for _, txnsetID := range diff.RevertedTransactions { - txns, ok := w.tpoolTxns[types.Hash256(txnsetID)] - if !ok { - continue - } - for _, txn := range txns { - for _, sci := range txn.Raw.SiacoinInputs { - delete(w.tpoolSpent, sci.ParentID) - } - for i := range txn.Raw.SiacoinOutputs { - delete(w.tpoolUtxos, txn.Raw.SiacoinOutputID(i)) - } - } - delete(w.tpoolTxns, types.Hash256(txnsetID)) - } - - for _, txnset := range diff.AppliedTransactions { - var relevantTxns []Transaction - - txnLoop: - for _, stxn := range txnset.Transactions { - var relevant bool - var txn types.Transaction - convertToCore(stxn, &txn) - processed := Transaction{ - ID: txn.ID(), - Index: types.ChainIndex{ - Height: currentHeight + 1, - }, - Raw: txn, - Timestamp: time.Now(), - } - for _, sci := range txn.SiacoinInputs { - if sci.UnlockConditions.UnlockHash() != w.addr { - continue - } - relevant = true - w.tpoolSpent[sci.ParentID] = true - - output, ok := siacoinOutputs[sci.ParentID] - if !ok { - // note: happens during deep reorgs. Possibly a race - // condition in siad. Log and skip. - w.log.Debug("tpool transaction unknown utxo", zap.Stringer("outputID", sci.ParentID), zap.Stringer("txnID", txn.ID())) - continue txnLoop - } - processed.Outflow = processed.Outflow.Add(output.Value) - } - - for i, sco := range txn.SiacoinOutputs { - if sco.Address != w.addr { - continue - } - relevant = true - outputID := txn.SiacoinOutputID(i) - processed.Inflow = processed.Inflow.Add(sco.Value) - sce := SiacoinElement{ - ID: types.Hash256(outputID), - SiacoinOutput: sco, - } - siacoinOutputs[outputID] = sce - w.tpoolUtxos[outputID] = sce - } - - if relevant { - relevantTxns = append(relevantTxns, processed) - } - } - - if len(relevantTxns) != 0 { - w.tpoolTxns[types.Hash256(txnset.ID)] = relevantTxns - } - } -} - -// unspentSiacoinElements is a helper that fetches UnspentSiacoinElements from -// the store and converts them to SiacoinElements. -func (w *SingleAddressWallet) unspentSiacoinElements() ([]SiacoinElement, error) { - unspent, err := w.store.UnspentSiacoinElements() - if err != nil { - return nil, err - } - - utxos := make([]SiacoinElement, len(unspent)) - for i := range unspent { - utxos[i] = convertToSiacoinElement(unspent[i]) - } - return utxos, nil -} - -// SumOutputs returns the total value of the supplied outputs. -func SumOutputs(outputs []SiacoinElement) (sum types.Currency) { - for _, o := range outputs { - sum = sum.Add(o.Value) - } - return -} - -// NewSingleAddressWallet returns a new SingleAddressWallet using the provided private key and store. -func NewSingleAddressWallet(priv types.PrivateKey, store SingleAddressStore, usedUTXOExpiry time.Duration, log *zap.SugaredLogger) *SingleAddressWallet { - return &SingleAddressWallet{ - priv: priv, - addr: StandardAddress(priv.PublicKey()), - store: store, - lastUsed: make(map[types.Hash256]time.Time), - usedUTXOExpiry: usedUTXOExpiry, - tpoolTxns: make(map[types.Hash256][]Transaction), - tpoolUtxos: make(map[types.SiacoinOutputID]SiacoinElement), - tpoolSpent: make(map[types.SiacoinOutputID]bool), - log: log.Named("wallet"), - } -} - -// convertToCore converts a siad type to an equivalent core type. -func convertToCore(siad encoding.SiaMarshaler, core types.DecoderFrom) { - var buf bytes.Buffer - siad.MarshalSia(&buf) - d := types.NewBufDecoder(buf.Bytes()) - core.DecodeFrom(d) - if d.Err() != nil { - panic(d.Err()) - } -} diff --git a/wallet/wallet_test.go b/wallet/wallet_test.go deleted file mode 100644 index 9ac3851a9..000000000 --- a/wallet/wallet_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package wallet - -import ( - "context" - "strings" - "testing" - - "go.sia.tech/core/consensus" - "go.sia.tech/core/types" - "go.sia.tech/coreutils/chain" - "go.sia.tech/coreutils/wallet" - "go.sia.tech/renterd/api" - "go.uber.org/zap" - "lukechampine.com/frand" -) - -// mockStore implements wallet.SingleAddressStore and allows to manipulate the -// wallet's utxos -type mockStore struct { - utxos []wallet.SiacoinElement -} - -func (s *mockStore) ProcessChainApplyUpdate(cau *chain.ApplyUpdate, mayCommit bool) error { return nil } -func (s *mockStore) ProcessChainRevertUpdate(cru *chain.RevertUpdate) error { return nil } - -func (s *mockStore) Balance() (types.Currency, error) { return types.ZeroCurrency, nil } -func (s *mockStore) Tip() (types.ChainIndex, error) { return types.ChainIndex{}, nil } -func (s *mockStore) UnspentSiacoinElements() ([]wallet.SiacoinElement, error) { - return s.utxos, nil -} -func (s *mockStore) WalletEvents(offset, limit int) ([]wallet.Event, error) { - return nil, nil -} -func (s *mockStore) WalletEventCount() (uint64, error) { - return 0, nil -} -func (s *mockStore) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { - return nil -} - -var cs = consensus.State{ - Index: types.ChainIndex{ - Height: 1, - ID: types.BlockID{}, - }, -} - -// TestWalletRedistribute is a small unit test that covers the functionality of -// the 'Redistribute' method on the wallet. -func TestWalletRedistribute(t *testing.T) { - oneSC := types.Siacoins(1) - - // create a wallet with one output - priv := types.GeneratePrivateKey() - pub := priv.PublicKey() - utxo := wallet.SiacoinElement{ - SiacoinElement: types.SiacoinElement{ - StateElement: types.StateElement{ - ID: randomOutputID(), - // TODO: LeafIndex missing - // TODO: MerkleProof missing - }, - SiacoinOutput: types.SiacoinOutput{ - Value: oneSC.Mul64(20), - Address: StandardAddress(pub), - }, - MaturityHeight: 0, - }, - // TODO: Index missing - } - s := &mockStore{utxos: []wallet.SiacoinElement{utxo}} - w := NewSingleAddressWallet(priv, s, 0, zap.NewNop().Sugar()) - - numOutputsWithValue := func(v types.Currency) (c uint64) { - utxos, _ := w.UnspentOutputs() - for _, utxo := range utxos { - if utxo.Value.Equals(v) { - c++ - } - } - return - } - - applyTxn := func(txn types.Transaction) { - for _, input := range txn.SiacoinInputs { - for i, utxo := range s.utxos { - if input.ParentID == types.SiacoinOutputID(utxo.ID) { - s.utxos[i] = s.utxos[len(s.utxos)-1] - s.utxos = s.utxos[:len(s.utxos)-1] - } - } - } - for _, output := range txn.SiacoinOutputs { - s.utxos = append(s.utxos, - wallet.SiacoinElement{ - SiacoinElement: types.SiacoinElement{ - StateElement: types.StateElement{ - ID: randomOutputID(), - // TODO: LeafIndex missing - // TODO: MerkleProof missing - }, - SiacoinOutput: output, - MaturityHeight: 0, - }, - // TODO: Index missing - }) - } - } - - // assert number of outputs - if utxos, err := w.UnspentOutputs(); err != nil { - t.Fatal(err) - } else if len(utxos) != 1 { - t.Fatalf("unexpected number of outputs, %v != 1", len(utxos)) - } - - // split into 3 outputs of 6SC each - amount := oneSC.Mul64(6) - if txns, _, err := w.Redistribute(cs, 3, amount, types.NewCurrency64(1), nil); err != nil { - t.Fatal(err) - } else if len(txns) != 1 { - t.Fatalf("unexpected number of txns, %v != 1", len(txns)) - } else { - applyTxn(txns[0]) - } - - // assert number of outputs - if utxos, err := w.UnspentOutputs(); err != nil { - t.Fatal(err) - } else if len(s.utxos) != 4 { - t.Fatalf("unexpected number of outputs, %v != 4", len(utxos)) - } - - // assert number of outputs that hold 6SC - if cnt := numOutputsWithValue(amount); cnt != 3 { - t.Fatalf("unexpected number of 6SC outputs, %v != 3", cnt) - } - - // split into 3 outputs of 7SC each, expect this to fail - _, _, err := w.Redistribute(cs, 3, oneSC.Mul64(7), types.NewCurrency64(1), nil) - if err == nil || !strings.Contains(err.Error(), "insufficient balance") { - t.Fatalf("unexpected err: '%v'", err) - } - - // split into 2 outputs of 9SC - amount = oneSC.Mul64(9) - if txns, _, err := w.Redistribute(cs, 2, amount, types.NewCurrency64(1), nil); err != nil { - t.Fatal(err) - } else if len(txns) != 1 { - t.Fatalf("unexpected number of txns, %v != 1", len(txns)) - } else { - applyTxn(txns[0]) - } - - // assert number of outputs - if utxos, err := w.UnspentOutputs(); err != nil { - t.Fatal(err) - } else if len(s.utxos) != 3 { - t.Fatalf("unexpected number of outputs, %v != 3", len(utxos)) - } - - // assert number of outputs that hold 9SC - if cnt := numOutputsWithValue(amount); cnt != 2 { - t.Fatalf("unexpected number of 9SC outputs, %v != 2", cnt) - } - - // split into 5 outputs of 3SC - amount = oneSC.Mul64(3) - if txns, _, err := w.Redistribute(cs, 5, amount, types.NewCurrency64(1), nil); err != nil { - t.Fatal(err) - } else if len(txns) != 1 { - t.Fatalf("unexpected number of txns, %v != 1", len(txns)) - } else { - applyTxn(txns[0]) - } - - // assert number of outputs that hold 3SC - if cnt := numOutputsWithValue(amount); cnt != 5 { - t.Fatalf("unexpected number of 3SC outputs, %v != 5", cnt) - } - - // split into 4 outputs of 3SC - this should be a no-op - if _, _, err := w.Redistribute(cs, 4, amount, types.NewCurrency64(1), nil); err != nil { - t.Fatal(err) - } - - // split into 6 outputs of 3SC - if txns, _, err := w.Redistribute(cs, 6, amount, types.NewCurrency64(1), nil); err != nil { - t.Fatal(err) - } else if len(txns) != 1 { - t.Fatalf("unexpected number of txns, %v != 1", len(txns)) - } else { - applyTxn(txns[0]) - } - - // assert number of outputs that hold 3SC - if cnt := numOutputsWithValue(amount); cnt != 6 { - t.Fatalf("unexpected number of 3SC outputs, %v != 6", cnt) - } - - // split into 2 times the redistributeBatchSize - amount = oneSC.Div64(10) - if txns, _, err := w.Redistribute(cs, 2*redistributeBatchSize, amount, types.NewCurrency64(1), nil); err != nil { - t.Fatal(err) - } else if len(txns) != 2 { - t.Fatalf("unexpected number of txns, %v != 2", len(txns)) - } else { - applyTxn(txns[0]) - applyTxn(txns[1]) - } - - // assert number of outputs that hold 0.1SC - if cnt := numOutputsWithValue(amount); cnt != 2*redistributeBatchSize { - t.Fatalf("unexpected number of 0.1SC outputs, %v != 20", cnt) - } -} - -func randomOutputID() (t types.Hash256) { - frand.Read(t[:]) - return -} diff --git a/worker/worker.go b/worker/worker.go index 768d6089b..ec03a7c60 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -485,6 +485,7 @@ func (w *worker) rhpBroadcastHandler(jc jape.Context) { if jc.Check("could not fetch revision", err) != nil { return } + // Create txn with revision. txn := types.Transaction{ FileContractRevisions: []types.FileContractRevision{rev.Revision},