From 93605a200abdf8787d6f7c5270e73773f5880995 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 30 May 2023 23:34:49 -0400 Subject: [PATCH 01/53] v2: Remove package --- v2/chain/manager.go | 656 --------- v2/chain/manager_test.go | 106 -- v2/consensus/applyrevert_test.go | 80 -- v2/consensus/scratch.go | 122 -- v2/consensus/scratch_test.go | 245 ---- v2/consensus/state.go | 336 ----- v2/consensus/update.go | 441 ------ v2/consensus/update_test.go | 1124 ---------------- v2/consensus/validation.go | 626 --------- v2/consensus/validation_test.go | 1370 ------------------- v2/go.mod | 16 - v2/go.sum | 45 - v2/host/budget.go | 96 -- v2/host/budget_test.go | 128 -- v2/host/executor.go | 530 -------- v2/host/host.go | 128 -- v2/host/registry.go | 64 - v2/host/registry_test.go | 173 --- v2/internal/blake2b/blake2b.go | 48 - v2/internal/blake2b/blake2b_amd64.go | 22 - v2/internal/blake2b/blake2b_amd64.s | 1646 ----------------------- v2/internal/blake2b/blake2b_generic.go | 12 - v2/internal/blake2b/blake2b_test.go | 57 - v2/internal/blake2b/gen.go | 317 ----- v2/internal/chainutil/chainutil.go | 238 ---- v2/internal/chainutil/chainutil_test.go | 86 -- v2/internal/chainutil/store.go | 462 ------- v2/internal/chainutil/store_test.go | 153 --- v2/merkle/accumulator.go | 511 ------- v2/merkle/accumulator_test.go | 464 ------- v2/merkle/merkle.go | 56 - v2/merkle/multiproof.go | 490 ------- v2/merkle/multiproof_test.go | 99 -- v2/net/gateway/peer.go | 149 -- v2/net/gateway/peer_test.go | 90 -- v2/net/gateway/rpc.go | 268 ---- v2/net/rhp/builder.go | 218 --- v2/net/rhp/builder_test.go | 246 ---- v2/net/rhp/contracts.go | 244 ---- v2/net/rhp/contracts_test.go | 554 -------- v2/net/rhp/mdm.go | 517 ------- v2/net/rhp/merkle.go | 412 ------ v2/net/rhp/merkle_test.go | 387 ------ v2/net/rhp/registry.go | 187 --- v2/net/rhp/registry_test.go | 84 -- v2/net/rhp/rpc.go | 1269 ----------------- v2/net/rhp/rpc_test.go | 102 -- v2/net/rhp/session.go | 123 -- v2/net/rhp/session_test.go | 283 ---- v2/net/rhp/settings.go | 188 --- v2/net/rpc/rpc.go | 173 --- v2/types/currency.go | 318 ----- v2/types/currency_test.go | 599 --------- v2/types/encoding.go | 873 ------------ v2/types/encoding_test.go | 156 --- v2/types/policy.go | 301 ----- v2/types/policy_test.go | 236 ---- v2/types/types.go | 884 ------------ v2/types/types_test.go | 64 - 59 files changed, 19872 deletions(-) delete mode 100644 v2/chain/manager.go delete mode 100644 v2/chain/manager_test.go delete mode 100644 v2/consensus/applyrevert_test.go delete mode 100644 v2/consensus/scratch.go delete mode 100644 v2/consensus/scratch_test.go delete mode 100644 v2/consensus/state.go delete mode 100644 v2/consensus/update.go delete mode 100644 v2/consensus/update_test.go delete mode 100644 v2/consensus/validation.go delete mode 100644 v2/consensus/validation_test.go delete mode 100644 v2/go.mod delete mode 100644 v2/go.sum delete mode 100644 v2/host/budget.go delete mode 100644 v2/host/budget_test.go delete mode 100644 v2/host/executor.go delete mode 100644 v2/host/host.go delete mode 100644 v2/host/registry.go delete mode 100644 v2/host/registry_test.go delete mode 100644 v2/internal/blake2b/blake2b.go delete mode 100644 v2/internal/blake2b/blake2b_amd64.go delete mode 100644 v2/internal/blake2b/blake2b_amd64.s delete mode 100644 v2/internal/blake2b/blake2b_generic.go delete mode 100644 v2/internal/blake2b/blake2b_test.go delete mode 100644 v2/internal/blake2b/gen.go delete mode 100644 v2/internal/chainutil/chainutil.go delete mode 100644 v2/internal/chainutil/chainutil_test.go delete mode 100644 v2/internal/chainutil/store.go delete mode 100644 v2/internal/chainutil/store_test.go delete mode 100644 v2/merkle/accumulator.go delete mode 100644 v2/merkle/accumulator_test.go delete mode 100644 v2/merkle/merkle.go delete mode 100644 v2/merkle/multiproof.go delete mode 100644 v2/merkle/multiproof_test.go delete mode 100644 v2/net/gateway/peer.go delete mode 100644 v2/net/gateway/peer_test.go delete mode 100644 v2/net/gateway/rpc.go delete mode 100644 v2/net/rhp/builder.go delete mode 100644 v2/net/rhp/builder_test.go delete mode 100644 v2/net/rhp/contracts.go delete mode 100644 v2/net/rhp/contracts_test.go delete mode 100644 v2/net/rhp/mdm.go delete mode 100644 v2/net/rhp/merkle.go delete mode 100644 v2/net/rhp/merkle_test.go delete mode 100644 v2/net/rhp/registry.go delete mode 100644 v2/net/rhp/registry_test.go delete mode 100644 v2/net/rhp/rpc.go delete mode 100644 v2/net/rhp/rpc_test.go delete mode 100644 v2/net/rhp/session.go delete mode 100644 v2/net/rhp/session_test.go delete mode 100644 v2/net/rhp/settings.go delete mode 100644 v2/net/rpc/rpc.go delete mode 100644 v2/types/currency.go delete mode 100644 v2/types/currency_test.go delete mode 100644 v2/types/encoding.go delete mode 100644 v2/types/encoding_test.go delete mode 100644 v2/types/policy.go delete mode 100644 v2/types/policy_test.go delete mode 100644 v2/types/types.go delete mode 100644 v2/types/types_test.go diff --git a/v2/chain/manager.go b/v2/chain/manager.go deleted file mode 100644 index 7253e532..00000000 --- a/v2/chain/manager.go +++ /dev/null @@ -1,656 +0,0 @@ -package chain - -import ( - "errors" - "fmt" - "sort" - "sync" - "time" - - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/types" -) - -var ( - // ErrFutureBlock is returned when a block's timestamp is too far in the future. - ErrFutureBlock = errors.New("block's timestamp is too far in the future") - - // ErrKnownBlock is returned when a block has already been processed. - ErrKnownBlock = errors.New("block already known") - - // ErrUnknownIndex is returned when an index references a block that we do - // not have. - ErrUnknownIndex = errors.New("unknown index") - - // ErrPruned is returned for blocks that are valid, but have been pruned. - ErrPruned = errors.New("block has been pruned") -) - -// An ApplyUpdate reflects the changes to the blockchain resulting from the -// addition of a block. -type ApplyUpdate struct { - consensus.ApplyUpdate - Block types.Block -} - -// A RevertUpdate reflects the changes to the blockchain resulting from the -// removal of a block. -type RevertUpdate struct { - consensus.RevertUpdate - Block types.Block -} - -// A Subscriber processes updates to the blockchain. Implementations must not -// modify or retain the provided update object. -type Subscriber interface { - // Implementations MUST not commit updates to persistent storage unless mayCommit is set. - ProcessChainApplyUpdate(cau *ApplyUpdate, mayCommit bool) error - ProcessChainRevertUpdate(cru *RevertUpdate) error -} - -// A ManagerStore durably commits Manager-related data to storage. -type ManagerStore interface { - AddCheckpoint(c consensus.Checkpoint) error - Checkpoint(index types.ChainIndex) (consensus.Checkpoint, error) - Header(index types.ChainIndex) (types.BlockHeader, error) - - ExtendBest(index types.ChainIndex) error - RewindBest() error - BestIndex(height uint64) (types.ChainIndex, error) - - Flush() error - Close() error -} - -// A Manager tracks multiple blockchains and identifies the best valid -// chain. -type Manager struct { - store ManagerStore - cs consensus.State - chains []*consensus.ScratchChain - subscribers []Subscriber - lastFlush time.Time - - mu sync.Mutex -} - -// TipState returns the consensus state for the current tip. -func (m *Manager) TipState() consensus.State { - m.mu.Lock() - defer m.mu.Unlock() - return m.cs -} - -// Tip returns the tip of the best known valid chain. -func (m *Manager) Tip() types.ChainIndex { - return m.TipState().Index -} - -// Block returns the block at the specified index. -func (m *Manager) Block(index types.ChainIndex) (types.Block, error) { - m.mu.Lock() - defer m.mu.Unlock() - c, err := m.store.Checkpoint(index) - return c.Block, err -} - -// State returns the consensus state for the specified index. -func (m *Manager) State(index types.ChainIndex) (consensus.State, error) { - m.mu.Lock() - defer m.mu.Unlock() - c, err := m.store.Checkpoint(index) - return c.State, err -} - -// History returns a set of chain indices that span the entire chain, beginning -// with the last 10, and subsequently spaced exponentionally farther apart until -// reaching the genesis block. -func (m *Manager) History() ([]types.ChainIndex, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // determine base of store - // - // TODO: store should probably just expose this - baseHeight := uint64(sort.Search(int(m.cs.Index.Height), func(height int) bool { - _, err := m.store.BestIndex(uint64(height)) - return err == nil - })) - - histHeight := func(i int) uint64 { - offset := uint64(i) - if offset >= 10 { - offset = 7 + 1<<(i-8) // strange, but it works - } - if offset > m.cs.Index.Height-baseHeight { - offset = m.cs.Index.Height - baseHeight - } - return m.cs.Index.Height - offset - } - var history []types.ChainIndex - for { - index, err := m.store.BestIndex(histHeight(len(history))) - if err != nil { - return nil, fmt.Errorf("failed to get best index at %v: %w", histHeight(len(history)), err) - } - history = append(history, index) - if index.Height == baseHeight { - break - } - } - return history, nil -} - -// HeadersForHistory fills the provided slice with consecutive headers from the -// best chain, starting from the "attach point" -- the first ChainIndex in the -// history that is present in the best chain (or, if no match is found, -// genesis). -// -// The returned slice may have fewer than len(headers) elements if the end of -// the best chain is reached. -func (m *Manager) HeadersForHistory(headers []types.BlockHeader, history []types.ChainIndex) ([]types.BlockHeader, error) { - m.mu.Lock() - defer m.mu.Unlock() - var attachHeight uint64 - for _, h := range history { - if index, err := m.store.BestIndex(h.Height); err != nil && !errors.Is(err, ErrUnknownIndex) && !errors.Is(err, ErrPruned) { - return nil, fmt.Errorf("couldn't retrieve header at height %v: %w", h.Height, err) - } else if index == h { - attachHeight = h.Height - break - } - } - for i := range headers { - if index, err := m.store.BestIndex(attachHeight + uint64(i) + 1); err != nil { - return headers[:i], nil - } else if headers[i], err = m.store.Header(index); err != nil { - return nil, fmt.Errorf("couldn't retrieve header %v: %w", index, err) - } - } - return headers, nil -} - -// AddHeaders incorporates a chain of headers, using some or all of them to -// extend a ScratchChain (or create a new one). If the incorporation of these -// headers causes a ScratchChain to become the new (unvalidated) best chain, -// that chain is returned; otherwise, AddHeaders returns nil. -func (m *Manager) AddHeaders(headers []types.BlockHeader) (*consensus.ScratchChain, error) { - m.mu.Lock() - defer m.mu.Unlock() - if len(headers) == 0 { - return nil, nil - } - // if the last header is in any known chain, we can ignore the entire set -- - // we've already seen them - headerTip := headers[len(headers)-1] - if m.cs.Index == headerTip.Index() { - return nil, nil - } else if _, err := m.store.Header(headerTip.Index()); err == nil { - return nil, nil - } - for _, sc := range m.chains { - if sc.Contains(headerTip.Index()) { - if sc.TotalWork().Cmp(m.cs.TotalWork) > 0 { - return sc, nil - } - return nil, nil - } - } - - // attempt to locate the chain that these headers attach to - var chain *consensus.ScratchChain - for _, sc := range m.chains { - if headerTip.Height <= sc.Tip().Height || headerTip.Height > sc.Tip().Height+uint64(len(headers)) { - continue - } - attachHeight := len(headers) - int(headerTip.Height-sc.Tip().Height) - if sc.Tip() == headers[attachHeight].ParentIndex() { - chain = sc - headers = headers[attachHeight:] - break - } - } - - // no existing chain; attempt to create a new one - if chain == nil { - // locate attach point - // - // TODO: linear scan is horribly inefficient here - // TODO: add a special case for attaching to the current tip - if _, err := m.store.Header(headers[0].ParentIndex()); err != nil { - return nil, fmt.Errorf("orphaned header chain %v: %w", headers[0].ParentIndex(), err) - } - for { - h := headers[0] - if _, err := m.store.Header(h.Index()); errors.Is(err, ErrUnknownIndex) { - break - } else if err != nil { - return nil, fmt.Errorf("could not read header: %w", err) - } - headers = headers[1:] - if len(headers) == 0 { - // NOTE: this should be unreachable because of the tip check at - // the top of this function, but we might as well handle it - // safely to prevent an OOB panic - return nil, nil - } - } - base, err := m.store.Header(headers[0].ParentIndex()) - if err != nil { - return nil, fmt.Errorf("could not load base of new chain %v: %w", headers[0].ParentIndex(), err) - } - c, err := m.store.Checkpoint(base.Index()) - if err != nil { - return nil, fmt.Errorf("could not load checkpoint %v: %w", base.Index(), err) - } - chain = consensus.NewScratchChain(c.State) - m.chains = append(m.chains, chain) - } - - // validate the headers - for _, h := range headers { - if h.Timestamp.After(m.cs.MaxFutureTimestamp(time.Now())) { - return nil, ErrFutureBlock - } else if err := chain.AppendHeader(h); err != nil { - // TODO: it's possible that the chain prior to this header is still - // the best; in that case, we should still reorg to it. But should - // the error be returned as well? - return nil, fmt.Errorf("header %v was invalid: %w", h.Index(), err) - } - } - - if chain.TotalWork().Cmp(m.cs.TotalWork) > 0 { - return chain, nil - } - return nil, nil -} - -// AddBlocks adds a sequence of blocks to a known ScratchChain. If the blocks -// are valid, the ScratchChain may become the new best chain, triggering a -// reorg. -func (m *Manager) AddBlocks(blocks []types.Block) (*consensus.ScratchChain, error) { - m.mu.Lock() - defer m.mu.Unlock() - if len(blocks) == 0 { - return nil, nil - } - index := blocks[0].Index() - var chain *consensus.ScratchChain - for _, sc := range m.chains { - if !sc.FullyValidated() && sc.ValidTip().Height >= (index.Height-1) && sc.Contains(index) { - chain = sc - break - } - } - if chain == nil { - return nil, fmt.Errorf("index %v does not attach to any known chain: %w", index, ErrUnknownIndex) - } - - // the chain may already contain some of the supplied blocks; ignore - // the ones we already have - have := chain.ValidTip().Height - (index.Height - 1) - blocks = blocks[have:] - - for _, b := range blocks { - c, err := chain.ApplyBlock(b) - if err != nil { - return nil, fmt.Errorf("invalid block %v: %w", b.Index(), err) - } else if err := m.store.AddCheckpoint(c); err != nil { - return nil, fmt.Errorf("couldn't store block: %w", err) - } else if c.State.TotalWork.Cmp(m.cs.TotalWork) <= 0 { - // keep validating blocks until this becomes the best chain - continue - } - - // this is now the best chain; if we haven't reorged to it yet, do so - if m.cs.Index != c.Block.Header.ParentIndex() { - if err := m.reorgTo(chain); err != nil { - return nil, fmt.Errorf("reorg failed: %w", err) - } - continue - } - // otherwise, apply directly to tip - if err := m.applyTip(c.State.Index); err != nil { - return nil, err - } - } - - if chain.FullyValidated() { - m.discardChain(chain) - } - - return chain, nil -} - -// AddTipBlock adds a single block to the current tip, triggering a reorg. -func (m *Manager) AddTipBlock(b types.Block) error { - m.mu.Lock() - defer m.mu.Unlock() - - // check whether the block attaches to our tip - if b.Header.ParentID != m.cs.Index.ID { - // if we've already processed this block, ignore it - if m.cs.Index == b.Index() { - return ErrKnownBlock - } - for _, sc := range m.chains { - if sc.Contains(b.Index()) && sc.ValidTip().Height >= b.Header.Height { - return ErrKnownBlock - } - } - if _, err := m.store.Header(b.Index()); err == nil { - return ErrKnownBlock - } else if err != ErrUnknownIndex { - return fmt.Errorf("could not load header %v: %w", b.Index(), err) - } - // TODO: check if we have the block's parent, and if so, whether adding - // this block would make it the best chain - return fmt.Errorf("missing parent for %v: %w", b.Index(), ErrUnknownIndex) - } - - // validate and store - if b.Header.Timestamp.After(m.cs.MaxFutureTimestamp(time.Now())) { - return ErrFutureBlock - } else if err := m.cs.ValidateBlock(b); err != nil { - return fmt.Errorf("invalid block: %w", err) - } - sau := consensus.ApplyBlock(m.cs, b) - if err := m.store.AddCheckpoint(consensus.Checkpoint{Block: b, State: sau.State}); err != nil { - return fmt.Errorf("failed to add checkpoint: %w", err) - } else if err := m.store.ExtendBest(b.Index()); err != nil { - return fmt.Errorf("couldn't update tip: %w", err) - } - m.cs = sau.State - - mayCommit := false - if time.Since(m.lastFlush) > time.Minute { - if err := m.store.Flush(); err != nil { - return fmt.Errorf("couldn't flush store: %w", err) - } - m.lastFlush = time.Now() - mayCommit = true - } - - // update subscribers - update := ApplyUpdate{sau, b} - for _, s := range m.subscribers { - if err := s.ProcessChainApplyUpdate(&update, mayCommit); err != nil { - return fmt.Errorf("subscriber %T: %w", s, err) - } - } - return nil -} - -// revertTip reverts the current tip. -func (m *Manager) revertTip() error { - c, err := m.store.Checkpoint(m.cs.Index) - if err != nil { - return fmt.Errorf("failed to get checkpoint for index %v: %w", m.cs.Index, err) - } - b := c.Block - c, err = m.store.Checkpoint(b.Header.ParentIndex()) - if err != nil { - return fmt.Errorf("failed to get checkpoint for parent %v: %w", b.Header.ParentIndex(), err) - } - cs := c.State - - sru := consensus.RevertBlock(cs, b) - update := RevertUpdate{sru, b} - for _, s := range m.subscribers { - if err := s.ProcessChainRevertUpdate(&update); err != nil { - return fmt.Errorf("subscriber %T: %w", s, err) - } - } - if err := m.store.RewindBest(); err != nil { - return fmt.Errorf("unable to rewind: %w", err) - } - - m.cs = cs - return nil -} - -// applyTip adds a block to the current tip. -func (m *Manager) applyTip(index types.ChainIndex) error { - c, err := m.store.Checkpoint(index) - if err != nil { - return fmt.Errorf("couldn't retrieve entry: %w", err) - } else if c.Block.Header.ParentIndex() != m.cs.Index { - panic("applyTip called with non-attaching block") - } - if err := m.store.ExtendBest(c.State.Index); err != nil { - return fmt.Errorf("couldn't update tip: %w", err) - } - - // flush at most once per minute; if we haven't flushed, tell the subscriber - // that it must not commit chain data to disk - mayCommit := false - if time.Since(m.lastFlush) > time.Minute { - if err := m.store.Flush(); err != nil { - return fmt.Errorf("couldn't flush store: %w", err) - } - m.lastFlush = time.Now() - mayCommit = true - } - - sau := consensus.ApplyBlock(m.cs, c.Block) - update := ApplyUpdate{sau, c.Block} - for _, s := range m.subscribers { - if err := s.ProcessChainApplyUpdate(&update, mayCommit); err != nil { - return fmt.Errorf("subscriber %T: %w", s, err) - } - } - - m.cs = sau.State - return nil -} - -func (m *Manager) reorgTo(sc *consensus.ScratchChain) error { - // starting at sc.Base(), follow parent chain until we connect to the - // current best chain - var rebase []types.ChainIndex - base, err := m.store.Header(sc.Base()) - if err != nil { - return fmt.Errorf("could not load base of new chain %v: %w", sc.Base(), err) - } - for { - if index, err := m.store.BestIndex(base.Height); err != nil && !errors.Is(err, ErrUnknownIndex) { - return fmt.Errorf("unable to get index for %v: %w", base.Height, err) - } else if index == base.Index() { - break - } - rebase = append(rebase, base.Index()) - base, err = m.store.Header(base.ParentIndex()) - if err != nil { - return fmt.Errorf("could not rebase new chain to %v: %w", base.ParentIndex(), err) - } - } - - // revert to branch point - for m.cs.Index != base.Index() { - if err := m.revertTip(); err != nil { - return fmt.Errorf("couldn't revert block %v: %w", m.cs.Index, err) - } - } - - // apply to scratch chain tip - for m.cs.Index != sc.ValidTip() { - var next types.ChainIndex - if len(rebase) > 0 { - rebase, next = rebase[:len(rebase)-1], rebase[len(rebase)-1] - } else { - next = sc.Index(m.cs.Index.Height + 1) - } - if err := m.applyTip(next); err != nil { - return fmt.Errorf("couldn't apply block %v: %w", next, err) - } - } - - return nil -} - -func (m *Manager) discardChain(sc *consensus.ScratchChain) { - for i := range m.chains { - if m.chains[i] == sc { - m.chains = append(m.chains[:i], m.chains[i+1:]...) - break - } - } -} - -func (m *Manager) reorgPath(a, b types.ChainIndex) (revert, apply []types.ChainIndex, err error) { - // TODO: In the common case, a and b will rejoin the best chain fairly - // quickly. Once both are on the best chain, we can determine their common - // ancestor directly, and read the path elements via BestIndex, which is - // (presumably) much faster than "parent-chasing" via Header. - - // helper function for "rewinding" to the parent index - rewind := func(index *types.ChainIndex) bool { - h, hErr := m.store.Header(*index) - if hErr != nil { - err = fmt.Errorf("failed to get header %v: %w", a, hErr) - return false - } - *index = h.ParentIndex() - return true - } - - // rewind a or b until their heights match - for a.Height > b.Height { - revert = append(revert, a) - if !rewind(&a) { - return - } - } - for b.Height > a.Height { - apply = append(apply, b) - if !rewind(&b) { - return - } - } - - // now rewind both until we reach a common ancestor - for a != b { - revert = append(revert, a) - apply = append(apply, b) - if !rewind(&a) || !rewind(&b) { - return - } - } - - // reverse the apply path - for i := 0; i < len(apply)/2; i++ { - j := len(apply) - i - 1 - apply[i], apply[j] = apply[j], apply[i] - } - return revert, apply, nil -} - -// AddSubscriber subscribes s to m, ensuring that it will receive updates when -// the best chain changes. If tip does not match the Manager's current tip, s is -// updated accordingly. -func (m *Manager) AddSubscriber(s Subscriber, tip types.ChainIndex) error { - m.mu.Lock() - defer m.mu.Unlock() - - // reorg s to the current tip, if necessary - revert, apply, err := m.reorgPath(tip, m.cs.Index) - if err != nil { - return fmt.Errorf("failed to establish reorg path from %v to %v: %w", tip, m.cs.Index, err) - } - for _, index := range revert { - c, err := m.store.Checkpoint(index) - if err != nil { - return fmt.Errorf("failed to get revert checkpoint %v: %w", index, err) - } - b := c.Block - c, err = m.store.Checkpoint(b.Header.ParentIndex()) - if err != nil { - return fmt.Errorf("failed to get revert parent checkpoint %v: %w", b.Header.ParentIndex(), err) - } - sru := consensus.RevertBlock(c.State, b) - if err := s.ProcessChainRevertUpdate(&RevertUpdate{sru, b}); err != nil { - return fmt.Errorf("failed to process revert update: %w", err) - } - } - for _, index := range apply { - c, err := m.store.Checkpoint(index) - if err != nil { - return fmt.Errorf("failed to get apply checkpoint %v: %w", index, err) - } - b := c.Block - c, err = m.store.Checkpoint(b.Header.ParentIndex()) - if err != nil { - return fmt.Errorf("failed to get apply parent checkpoint %v: %w", b.Header.ParentIndex(), err) - } - sau := consensus.ApplyBlock(c.State, b) - shouldCommit := index == m.cs.Index - if err := s.ProcessChainApplyUpdate(&ApplyUpdate{sau, b}, shouldCommit); err != nil { - return fmt.Errorf("failed to process apply update: %w", err) - } - } - m.subscribers = append(m.subscribers, s) - return nil -} - -// UpdateElementProof updates the Merkle proof of the provided StateElement, -// which must be valid as of index a, so that it is valid as of index b. An -// error is returned if the Manager cannot establish a path from a to b, or if -// the StateElement does not exist at index b. -func (m *Manager) UpdateElementProof(e *types.StateElement, a, b types.ChainIndex) error { - m.mu.Lock() - defer m.mu.Unlock() - revert, apply, err := m.reorgPath(a, b) - if err != nil { - return fmt.Errorf("failed to establish reorg path from %v to %v: %w", a, b, err) - } - for _, index := range revert { - c, err := m.store.Checkpoint(index) - if err != nil { - return fmt.Errorf("failed to get revert checkpoint %v: %w", index, err) - } - b := c.Block - c, err = m.store.Checkpoint(b.Header.ParentIndex()) - if err != nil { - return fmt.Errorf("failed to get revert parent checkpoint %v: %w", b.Header.ParentIndex(), err) - } - sru := consensus.RevertBlock(c.State, b) - if e.LeafIndex >= sru.State.Elements.NumLeaves { - return fmt.Errorf("element %v does not exist at destination index", e.ID) - } - sru.UpdateElementProof(e) - } - for _, index := range apply { - c, err := m.store.Checkpoint(index) - if err != nil { - return fmt.Errorf("failed to get apply checkpoint %v: %w", index, err) - } - b := c.Block - c, err = m.store.Checkpoint(b.Header.ParentIndex()) - if err != nil { - return fmt.Errorf("failed to get apply parent checkpoint %v: %w", b.Header.ParentIndex(), err) - } - sau := consensus.ApplyBlock(c.State, b) - sau.UpdateElementProof(e) - } - return nil -} - -// Close flushes and closes the underlying store. -func (m *Manager) Close() error { - m.mu.Lock() - defer m.mu.Unlock() - if err := m.store.Flush(); err != nil { - m.store.Close() - return fmt.Errorf("error flushing store: %w", err) - } - return m.store.Close() -} - -// NewManager returns a Manager initialized with the provided Store and State. -func NewManager(store ManagerStore, cs consensus.State) *Manager { - return &Manager{ - store: store, - cs: cs, - lastFlush: time.Now(), - } -} diff --git a/v2/chain/manager_test.go b/v2/chain/manager_test.go deleted file mode 100644 index 3c5812fd..00000000 --- a/v2/chain/manager_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package chain_test - -import ( - "reflect" - "testing" - - "go.sia.tech/core/v2/chain" - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/internal/chainutil" - "go.sia.tech/core/v2/types" -) - -func newTestStore(tb testing.TB, checkpoint consensus.Checkpoint) *chainutil.FlatStore { - fs, _, err := chainutil.NewFlatStore(tb.TempDir(), checkpoint) - if err != nil { - tb.Fatal(err) - } - return fs -} - -type historySubscriber struct { - revertHistory []uint64 - applyHistory []uint64 -} - -func (hs *historySubscriber) ProcessChainApplyUpdate(cau *chain.ApplyUpdate, _ bool) error { - hs.applyHistory = append(hs.applyHistory, cau.Block.Header.Height) - return nil -} - -func (hs *historySubscriber) ProcessChainRevertUpdate(cru *chain.RevertUpdate) error { - hs.revertHistory = append(hs.revertHistory, cru.Block.Header.Height) - return nil -} - -func TestManager(t *testing.T) { - sim := chainutil.NewChainSim() - - store := newTestStore(t, sim.Genesis) - cm := chain.NewManager(store, sim.State) - defer cm.Close() - - var hs historySubscriber - cm.AddSubscriber(&hs, cm.Tip()) - - // mine 5 blocks, fork, then mine 5 more blocks - sim.MineBlocks(5) - fork := sim.Fork() - sim.MineBlocks(5) - - // give the blocks to the manager - for _, b := range sim.Chain { - if err := cm.AddTipBlock(b); err != nil { - t.Fatal(err) - } - } - - // all blocks should have been applied - if !reflect.DeepEqual(hs.revertHistory, []uint64(nil)) { - t.Fatal("no blocks should have been reverted:", hs.revertHistory) - } else if !reflect.DeepEqual(hs.applyHistory, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { - t.Fatal("10 blocks should have been applied:", hs.applyHistory) - } - - // mine 10 blocks on the fork, ensuring that it has more total work, and give them to the manager - betterChain := fork.MineBlocks(10) - chainutil.FindBlockNonce(fork.State, &betterChain[9].Header, types.HashRequiringWork(sim.State.TotalWork)) - hs.revertHistory = nil - hs.applyHistory = nil - if _, err := cm.AddHeaders(chainutil.JustHeaders(betterChain)); err != nil { - t.Fatal(err) - } else if _, err := cm.AddBlocks(betterChain); err != nil { - t.Fatal(err) - } - - // check that we reorged to the better chain - if !reflect.DeepEqual(hs.revertHistory, []uint64{10, 9, 8, 7, 6}) { - t.Fatal("5 blocks should have been reverted:", hs.revertHistory) - } else if !reflect.DeepEqual(hs.applyHistory, []uint64{6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) { - t.Fatal("10 blocks should have been applied:", hs.applyHistory) - } - if cm.Tip() != betterChain[len(betterChain)-1].Index() { - t.Fatal("didn't reorg to better chain") - } - for _, b := range betterChain { - index, err := store.BestIndex(b.Header.Height) - if err != nil { - t.Fatal(err) - } else if index != b.Index() { - t.Error("store does not contain better chain:", index, b.Index()) - } - } - - // add a subscriber whose tip is in the middle of the old chain - subTip := sim.Chain[7].Index() - var hs2 historySubscriber - if err := cm.AddSubscriber(&hs2, subTip); err != nil { - t.Fatal(err) - } - // check that the subscriber was properly synced - if !reflect.DeepEqual(hs2.revertHistory, []uint64{8, 7, 6}) { - t.Fatal("3 blocks should have been reverted:", hs2.revertHistory) - } else if !reflect.DeepEqual(hs2.applyHistory, []uint64{6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) { - t.Fatal("10 blocks should have been applied:", hs2.applyHistory) - } -} diff --git a/v2/consensus/applyrevert_test.go b/v2/consensus/applyrevert_test.go deleted file mode 100644 index 116db37b..00000000 --- a/v2/consensus/applyrevert_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package consensus_test - -import ( - "sort" - "testing" - - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/internal/chainutil" - "go.sia.tech/core/v2/types" -) - -func TestApplyRevertBlock(t *testing.T) { - // mine 100 blocks, then apply/revert them in a random walk, ensuring that - // the results remain consistent - - sim := chainutil.NewChainSim() - - s := sim.State - sau := consensus.GenesisUpdate(sim.Genesis.Block, s.Difficulty) - var elems []types.StateElement - for _, sce := range sau.NewSiacoinElements { - elems = append(elems, sce.StateElement) - } - sort.Slice(elems, func(i, j int) bool { - return elems[i].LeafIndex < elems[j].LeafIndex - }) - for i := 0; i < 100; i++ { - b := sim.MineBlock() - sau = consensus.ApplyBlock(s, b) - - // create new accumulator tree, using new + updated elements - newElems := append([]types.StateElement(nil), elems...) - for i := range newElems { - newElems[i].MerkleProof = append([]types.Hash256(nil), newElems[i].MerkleProof...) - sau.UpdateElementProof(&newElems[i]) - } - for _, sce := range sau.NewSiacoinElements { - e := sce.StateElement - e.MerkleProof = append([]types.Hash256(nil), e.MerkleProof...) - newElems = append(newElems, e) - } - if len(newElems) != int(sau.State.Elements.NumLeaves) { - t.Fatal("accumulator size does not match", len(newElems), sau.State.Elements.NumLeaves) - } - sort.Slice(newElems, func(i, j int) bool { - return newElems[i].LeafIndex < newElems[j].LeafIndex - }) - - // revert the block - sru := consensus.RevertBlock(s, b) - var relems []types.StateElement - for _, e := range newElems { - if e.LeafIndex < sru.State.Elements.NumLeaves { - e.MerkleProof = append([]types.Hash256(nil), e.MerkleProof...) - sru.UpdateElementProof(&e) - relems = append(relems, e) - } - } - if len(relems) != len(elems) { - t.Fatal("tree does not match after revert:", len(relems), len(elems)) - } - - // check equality - for i := range relems { - a, b := relems[i], elems[i] - eq := a.ID == b.ID && a.LeafIndex == b.LeafIndex && len(a.MerkleProof) == len(b.MerkleProof) - if eq { - for i := range a.MerkleProof { - eq = eq && a.MerkleProof[i] == b.MerkleProof[i] - } - } - if !eq { - t.Error("reverted element does not match previous accumulator:", "\n", a, "\n\n", b) - } - } - - s = sau.State - elems = newElems - } -} diff --git a/v2/consensus/scratch.go b/v2/consensus/scratch.go deleted file mode 100644 index 5bc62d4b..00000000 --- a/v2/consensus/scratch.go +++ /dev/null @@ -1,122 +0,0 @@ -package consensus - -import ( - "errors" - - "go.sia.tech/core/v2/types" -) - -// A ScratchChain processes a potential extension or fork of the best chain, -// first validating its headers, then its transactions. -type ScratchChain struct { - base types.ChainIndex - headers []types.BlockHeader - - hs State // for validating headers - ts State // for validating transactions -} - -// AppendHeader validates the supplied header and appends it to the chain. -// Headers must be appended before their transactions can be filled in with -// AppendBlockTransactions. -func (sc *ScratchChain) AppendHeader(h types.BlockHeader) error { - if err := sc.hs.validateHeader(h); err != nil { - return err - } - applyHeader(&sc.hs, h) - sc.headers = append(sc.headers, h) - return nil -} - -// ApplyBlock applies b to the chain. The block's validated header must already -// exist in the chain. -func (sc *ScratchChain) ApplyBlock(b types.Block) (Checkpoint, error) { - if sc.ts.Index.Height+1 > sc.hs.Index.Height { - return Checkpoint{}, errors.New("more blocks than headers") - } else if err := sc.ts.ValidateBlock(b); err != nil { - return Checkpoint{}, err - } - sc.ts = ApplyBlock(sc.ts, b).State - return Checkpoint{ - Block: b, - State: sc.ts, - }, nil -} - -// Index returns the chain index at the specified height. The index may or may -// not have a corresponding validated block. -func (sc *ScratchChain) Index(height uint64) types.ChainIndex { - // if the height matches our current tip, return that - if height == sc.Tip().Height { - return sc.Tip() - } - // otherwise, we should have a child header, so we can use its ParentIndex - // instead of re-hashing the actual header - return sc.headers[height-sc.Base().Height].ParentIndex() -} - -// Base returns the base of the header chain, i.e. the parent of the first -// header. -func (sc *ScratchChain) Base() types.ChainIndex { - return sc.base -} - -// Tip returns the tip of the header chain, which may or may not have a -// corresponding validated block. -func (sc *ScratchChain) Tip() types.ChainIndex { - return sc.hs.Index -} - -// UnvalidatedBase returns the base of the unvalidated header chain, i.e. the -// lowest index for which there is no validated block. If all of the blocks have -// been validated, UnvalidatedBase panics. -func (sc *ScratchChain) UnvalidatedBase() types.ChainIndex { - if sc.ts.Index.Height == sc.base.Height { - return sc.base - } - return sc.Index(sc.ts.Index.Height + 1) -} - -// ValidTip returns the tip of the validated header chain, i.e. the highest -// index for which there is a known validated block. -func (sc *ScratchChain) ValidTip() types.ChainIndex { - return sc.ts.Index -} - -// FullyValidated is equivalent to sc.Tip() == sc.ValidTip(). -func (sc *ScratchChain) FullyValidated() bool { - return sc.ts.Index == sc.hs.Index -} - -// TotalWork returns the total work of the header chain. -func (sc *ScratchChain) TotalWork() types.Work { - return sc.hs.TotalWork -} - -// Contains returns whether the chain contains the specified index. It does not -// indicate whether the transactions for that index have been validated. -func (sc *ScratchChain) Contains(index types.ChainIndex) bool { - if !(sc.Base().Height < index.Height && index.Height <= sc.Tip().Height) { - return false - } - return sc.Index(index.Height) == index -} - -// Unvalidated returns the indexes of all the unvalidated blocks in the chain. -func (sc *ScratchChain) Unvalidated() []types.ChainIndex { - headers := sc.headers[sc.ts.Index.Height-sc.Base().Height:] - indices := make([]types.ChainIndex, len(headers)) - for i := range indices { - indices[i] = sc.Index(headers[i].Height) - } - return indices -} - -// NewScratchChain initializes a ScratchChain with the provided State. -func NewScratchChain(s State) *ScratchChain { - return &ScratchChain{ - base: s.Index, - hs: s, - ts: s, - } -} diff --git a/v2/consensus/scratch_test.go b/v2/consensus/scratch_test.go deleted file mode 100644 index d002354a..00000000 --- a/v2/consensus/scratch_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package consensus - -import ( - "math" - "testing" - "time" - - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -// copied from chainutil (can't import due to cycle) -func findBlockNonce(sc State, h *types.BlockHeader, target types.BlockID) { - h.Nonce = frand.Uint64n(math.MaxUint32) * sc.NonceFactor() - for !h.ID().MeetsTarget(target) { - h.Nonce += sc.NonceFactor() - } -} - -func mineBlock(s State, parent types.Block, txns ...types.Transaction) types.Block { - b := types.Block{ - Header: types.BlockHeader{ - Height: parent.Header.Height + 1, - ParentID: parent.Header.ID(), - Timestamp: parent.Header.Timestamp.Add(time.Second), - }, - Transactions: txns, - } - b.Header.Commitment = s.Commitment(b.Header.MinerAddress, b.Transactions) - findBlockNonce(s, &b.Header, types.HashRequiringWork(s.Difficulty)) - return b -} - -func TestScratchChain(t *testing.T) { - pubkey, privkey := testingKeypair(0) - ourAddr := types.StandardAddress(pubkey) - - b := genesisWithSiacoinOutputs([]types.SiacoinOutput{ - {Value: types.Siacoins(1), Address: ourAddr}, - {Value: types.Siacoins(2), Address: ourAddr}, - {Value: types.Siacoins(3), Address: ourAddr}, - {Value: types.Siacoins(4), Address: ourAddr}, - {Value: types.Siacoins(5), Address: ourAddr}, - {Value: types.Siacoins(6), Address: ourAddr}, - {Value: types.Siacoins(7), Address: ourAddr}, - {Value: types.Siacoins(8), Address: ourAddr}, - {Value: types.Siacoins(9), Address: ourAddr}, - {Value: types.Siacoins(10), Address: ourAddr}, - {Value: types.Siacoins(11), Address: ourAddr}, - {Value: types.Siacoins(12), Address: ourAddr}, - {Value: types.Siacoins(13), Address: ourAddr}, - }...) - sau := GenesisUpdate(b, testingDifficulty) - - sc := NewScratchChain(sau.State) - if sc.Base() != sau.State.Index { - t.Fatal("wrong base:", sc.Base()) - } else if sc.Tip() != sau.State.Index { - t.Fatal("wrong tip:", sc.Tip()) - } else if sc.UnvalidatedBase() != sau.State.Index { - t.Fatal("wrong unvalidated base:", sc.UnvalidatedBase()) - } - var blocks []types.Block - origOutputs := sau.NewSiacoinElements - toSpend := origOutputs[5:10] - var spendTotal types.Currency - for _, o := range toSpend { - spendTotal = spendTotal.Add(o.Value) - } - txn := types.Transaction{ - SiacoinOutputs: []types.SiacoinOutput{{ - Value: spendTotal.Sub(types.Siacoins(1)), - Address: ourAddr, - }}, - MinerFee: types.Siacoins(1), - } - for _, o := range toSpend { - txn.SiacoinInputs = append(txn.SiacoinInputs, types.SiacoinInput{ - Parent: o, - SpendPolicy: types.PolicyPublicKey(pubkey), - }) - } - signAllInputs(&txn, sau.State, privkey) - - b = mineBlock(sau.State, b, txn) - if sc.Contains(b.Index()) { - t.Fatal("scratch chain should not contain the header yet") - } else if _, err := sc.ApplyBlock(b); err == nil { - t.Fatal("shouldn't be able to apply a block without a corresponding header") - } else if err := sc.AppendHeader(b.Header); err != nil { - t.Fatal(err) - } else if sc.Tip() != b.Index() { - t.Fatal("wrong tip:", sc.Tip()) - } else if sc.UnvalidatedBase() != sc.Base() { - t.Fatal("wrong unvalidated base:", sc.UnvalidatedBase()) - } else if !sc.Contains(b.Index()) { - t.Fatal("scratch chain should contain the header") - } else if sc.TotalWork() != testingDifficulty { - t.Fatal("wrong total work:", sc.TotalWork()) - } - blocks = append(blocks, b) - - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&origOutputs[2].StateElement) - newOutputs := sau.NewSiacoinElements - - txn = types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: newOutputs[1], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Value: newOutputs[1].Value.Sub(types.Siacoins(1)), - Address: ourAddr, - }}, - MinerFee: types.Siacoins(1), - } - signAllInputs(&txn, sau.State, privkey) - - b = mineBlock(sau.State, b, txn) - if err := sc.AppendHeader(b.Header); err != nil { - t.Fatal(err) - } - blocks = append(blocks, b) - sau = ApplyBlock(sau.State, b) - for i := range origOutputs { - sau.UpdateElementProof(&origOutputs[i].StateElement) - } - toSpend = origOutputs[2:3] - spendTotal = types.ZeroCurrency - for _, o := range toSpend { - spendTotal = spendTotal.Add(o.Value) - } - parentTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: toSpend[0], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Value: spendTotal, - Address: ourAddr, - }}, - } - signAllInputs(&parentTxn, sau.State, privkey) - childTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: types.SiacoinElement{ - StateElement: types.StateElement{ - ID: types.ElementID{ - Source: types.Hash256(parentTxn.ID()), - Index: 0, - }, - LeafIndex: types.EphemeralLeafIndex, - }, - SiacoinOutput: types.SiacoinOutput{ - Value: spendTotal, - Address: ourAddr, - }, - }, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Value: spendTotal.Sub(types.Siacoins(1)), - Address: ourAddr, - }}, - MinerFee: types.Siacoins(1), - } - signAllInputs(&childTxn, sau.State, privkey) - - b = mineBlock(sau.State, b, parentTxn, childTxn) - if err := sc.AppendHeader(b.Header); err != nil { - t.Fatal(err) - } - blocks = append(blocks, b) - - // should have one unvalidated header for each block - if sc.FullyValidated() { - t.Fatal("scratch chain should not be fully validated yet") - } else if len(sc.Unvalidated()) != len(blocks) { - t.Fatal("unvalidated headers not equal to blocks") - } - for i, index := range sc.Unvalidated() { - if index != blocks[i].Index() { - t.Fatal("unvalidated header not equal to block") - } else if sc.Index(index.Height) != index { - t.Fatal("inconsistent index:", sc.Index(index.Height), index) - } - } - - // validate all blocks - for _, b := range blocks { - if _, err := sc.ApplyBlock(b); err != nil { - t.Fatal(err) - } else if sc.ValidTip() != b.Index() { - t.Fatal("wrong valid tip:", sc.ValidTip()) - } else if len(sc.Unvalidated()) > 0 && sc.UnvalidatedBase() != sc.Index(b.Header.Height+1) { - t.Fatal("wrong unvalidated base:", sc.UnvalidatedBase()) - } - } - if !sc.FullyValidated() { - t.Fatal("scratch chain should be fully validated") - } else if len(sc.Unvalidated()) != 0 { - t.Fatal("scratch chain should not have any unvalidated headers") - } -} - -func TestScratchChainDifficultyAdjustment(t *testing.T) { - b := genesisWithSiacoinOutputs() - s := GenesisUpdate(b, testingDifficulty).State - - // mine a block, triggering adjustment - sc := NewScratchChain(s) - b = mineBlock(s, b) - if err := sc.AppendHeader(b.Header); err != nil { - t.Fatal(err) - } else if _, err := sc.ApplyBlock(b); err != nil { - t.Fatal(err) - } - s = ApplyBlock(s, b).State - - // difficulty should have changed - currentDifficulty := sc.ts.Difficulty - if currentDifficulty.Cmp(testingDifficulty) <= 0 { - t.Fatal("difficulty should have increased") - } - - // mine a block with less than the minimum work; it should be rejected - b = mineBlock(s, b) - for types.WorkRequiredForHash(b.ID()).Cmp(currentDifficulty) >= 0 { - b.Header.Nonce = frand.Uint64n(math.MaxUint32) * s.NonceFactor() - } - if err := sc.AppendHeader(b.Header); err == nil { - t.Fatal("expected block to be rejected") - } - - // mine at actual difficulty - findBlockNonce(s, &b.Header, types.HashRequiringWork(s.Difficulty)) - if err := sc.AppendHeader(b.Header); err != nil { - t.Fatal(err) - } else if _, err := sc.ApplyBlock(b); err != nil { - t.Fatal(err) - } - s = ApplyBlock(s, b).State -} diff --git a/v2/consensus/state.go b/v2/consensus/state.go deleted file mode 100644 index 044344cf..00000000 --- a/v2/consensus/state.go +++ /dev/null @@ -1,336 +0,0 @@ -package consensus - -import ( - "encoding/binary" - "math/bits" - "sync" - "time" - - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/types" -) - -const ( - blocksPerYear = 144 * 365 - - asicHardforkHeight = 179000 - foundationHardforkHeight = 300000 - - foundationSubsidyFrequency = blocksPerYear / 12 -) - -// Pool for reducing heap allocations when hashing. This is only necessary -// because blake2b.New256 returns a hash.Hash interface, which prevents the -// compiler from doing escape analysis. Can be removed if we switch to an -// implementation whose constructor returns a concrete type. -var hasherPool = &sync.Pool{New: func() interface{} { return types.NewHasher() }} - -// State represents the full state of the chain as of a particular block. -type State struct { - Index types.ChainIndex `json:"index"` - Elements merkle.ElementAccumulator `json:"elements"` - History merkle.HistoryAccumulator `json:"history"` - PrevTimestamps [11]time.Time `json:"prevTimestamps"` - - TotalWork types.Work `json:"totalWork"` - Difficulty types.Work `json:"difficulty"` - OakWork types.Work `json:"oakWork"` - OakTime time.Duration `json:"oakTime"` - GenesisTimestamp time.Time `json:"genesisTimestamp"` - - SiafundPool types.Currency `json:"siafundPool"` - FoundationAddress types.Address `json:"foundationAddress"` -} - -// EncodeTo implements types.EncoderTo. -func (s State) EncodeTo(e *types.Encoder) { - s.Index.EncodeTo(e) - s.Elements.EncodeTo(e) - s.History.EncodeTo(e) - for _, ts := range s.PrevTimestamps { - e.WriteTime(ts) - } - s.TotalWork.EncodeTo(e) - s.Difficulty.EncodeTo(e) - s.OakWork.EncodeTo(e) - e.WriteUint64(uint64(s.OakTime)) - e.WriteTime(s.GenesisTimestamp) - s.SiafundPool.EncodeTo(e) - s.FoundationAddress.EncodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom. -func (s *State) DecodeFrom(d *types.Decoder) { - s.Index.DecodeFrom(d) - s.Elements.DecodeFrom(d) - s.History.DecodeFrom(d) - for i := range s.PrevTimestamps { - s.PrevTimestamps[i] = d.ReadTime() - } - s.TotalWork.DecodeFrom(d) - s.Difficulty.DecodeFrom(d) - s.OakWork.DecodeFrom(d) - s.OakTime = time.Duration(d.ReadUint64()) - s.GenesisTimestamp = d.ReadTime() - s.SiafundPool.DecodeFrom(d) - s.FoundationAddress.DecodeFrom(d) -} - -func (s State) numTimestamps() int { - if s.Index.Height+1 < uint64(len(s.PrevTimestamps)) { - return int(s.Index.Height + 1) - } - return len(s.PrevTimestamps) -} - -// BlockInterval is the expected wall clock time between consecutive blocks. -func (s State) BlockInterval() time.Duration { - return 10 * time.Minute -} - -// BlockReward returns the reward for mining a child block. -func (s State) BlockReward() types.Currency { - const initialCoinbase = 300000 - const minimumCoinbase = 30000 - blockHeight := s.Index.Height + 1 - if blockHeight < initialCoinbase-minimumCoinbase { - return types.Siacoins(uint32(initialCoinbase - blockHeight)) - } - return types.Siacoins(minimumCoinbase) -} - -// MaturityHeight is the height at which various outputs created in the child -// block will "mature" (become spendable). -// -// To prevent reorgs from invalidating large swathes of transactions, we impose -// a timelock on any output that is "linked" to a particular block. -// Specifically, we timelock block rewards, Foundation subsidies, siafund -// claims, and contract resolutions. If a reorg occurs, these outputs may no -// longer exist, so transactions that use them may become invalid (along with -// any transaction that depend on *those* transactions, and so on). Adding a -// timelock does not completely eliminate this issue -- after all, reorgs can be -// arbitrarily deep -- but it does make it highly unlikely to occur in practice. -func (s State) MaturityHeight() uint64 { - return (s.Index.Height + 1) + 144 -} - -// SiafundCount is the number of siafunds in existence. -func (s State) SiafundCount() uint64 { - return 10000 -} - -// FoundationSubsidy returns the Foundation subsidy value for the child block. -func (s State) FoundationSubsidy() types.Currency { - foundationSubsidyPerBlock := types.Siacoins(30000) - initialfoundationSubsidy := foundationSubsidyPerBlock.Mul64(blocksPerYear) - - blockHeight := s.Index.Height + 1 - if blockHeight < foundationHardforkHeight || (blockHeight-foundationHardforkHeight)%foundationSubsidyFrequency != 0 { - return types.ZeroCurrency - } else if blockHeight == foundationHardforkHeight { - return initialfoundationSubsidy - } - return foundationSubsidyPerBlock.Mul64(foundationSubsidyFrequency) -} - -// NonceFactor is the factor by which all block nonces must be divisible. -func (s State) NonceFactor() uint64 { - blockHeight := s.Index.Height + 1 - if blockHeight < asicHardforkHeight { - return 1 - } - return 1009 -} - -// MaxBlockWeight is the maximum "weight" of a valid child block. -func (s State) MaxBlockWeight() uint64 { - return 2_000_000 -} - -// TransactionWeight computes the weight of a txn. -func (s State) TransactionWeight(txn types.Transaction) uint64 { - storage := types.EncodedLen(txn) - - var signatures int - for _, in := range txn.SiacoinInputs { - signatures += len(in.Signatures) - } - for _, in := range txn.SiafundInputs { - signatures += len(in.Signatures) - } - signatures += 2 * len(txn.FileContractRevisions) - signatures += len(txn.Attestations) - - return uint64(storage) + 100*uint64(signatures) -} - -// BlockWeight computes the combined weight of a block's txns. -func (s State) BlockWeight(txns []types.Transaction) uint64 { - var weight uint64 - for _, txn := range txns { - weight += s.TransactionWeight(txn) - } - return weight -} - -// FileContractTax computes the tax levied on a given contract. -func (s State) FileContractTax(fc types.FileContract) types.Currency { - sum := fc.RenterOutput.Value.Add(fc.HostOutput.Value) - tax := sum.Div64(25) // 4% - // round down to nearest multiple of SiafundCount - _, r := bits.Div64(0, tax.Hi, s.SiafundCount()) - _, r = bits.Div64(r, tax.Lo, s.SiafundCount()) - return tax.Sub(types.NewCurrency64(r)) -} - -// StorageProofLeafIndex returns the leaf index used when computing or -// validating a storage proof. -func (s State) StorageProofLeafIndex(filesize uint64, windowStart types.ChainIndex, fcid types.ElementID) uint64 { - const leafSize = uint64(len(types.StorageProof{}.Leaf)) - if filesize <= leafSize { - return 0 - } - numLeaves := filesize / leafSize - if filesize%leafSize != 0 { - numLeaves++ - } - - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - windowStart.EncodeTo(h.E) - fcid.EncodeTo(h.E) - seed := h.Sum() - - var r uint64 - for i := 0; i < len(seed); i += 8 { - _, r = bits.Div64(r, binary.BigEndian.Uint64(seed[i:]), numLeaves) - } - return r -} - -// Commitment computes the commitment hash for a child block. -func (s State) Commitment(minerAddr types.Address, txns []types.Transaction) types.Hash256 { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - - // hash the state - s.EncodeTo(h.E) - stateHash := h.Sum() - - // hash the transactions - h.Reset() - h.E.WritePrefix(len(txns)) - for _, txn := range txns { - txn.ID().EncodeTo(h.E) - } - txnsHash := h.Sum() - - // concatenate the hashes and the miner address - h.Reset() - h.E.WriteString("sia/commitment") - stateHash.EncodeTo(h.E) - minerAddr.EncodeTo(h.E) - txnsHash.EncodeTo(h.E) - return h.Sum() -} - -// InputSigHash returns the hash that must be signed for each transaction input. -func (s State) InputSigHash(txn types.Transaction) types.Hash256 { - // NOTE: This currently covers exactly the same fields as txn.ID(), and for - // similar reasons. - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/sig/transactioninput") - h.E.WritePrefix(len(txn.SiacoinInputs)) - for _, in := range txn.SiacoinInputs { - in.Parent.ID.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.SiacoinOutputs)) - for _, out := range txn.SiacoinOutputs { - out.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.SiafundInputs)) - for _, in := range txn.SiafundInputs { - in.Parent.ID.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.SiafundOutputs)) - for _, out := range txn.SiafundOutputs { - out.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.FileContracts)) - for _, fc := range txn.FileContracts { - fc.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.FileContractRevisions)) - for _, fcr := range txn.FileContractRevisions { - fcr.Parent.ID.EncodeTo(h.E) - fcr.Revision.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.FileContractResolutions)) - for _, fcr := range txn.FileContractResolutions { - fcr.Parent.ID.EncodeTo(h.E) - fcr.Renewal.EncodeTo(h.E) - fcr.StorageProof.WindowStart.EncodeTo(h.E) - fcr.Finalization.EncodeTo(h.E) - } - for _, a := range txn.Attestations { - a.EncodeTo(h.E) - } - h.E.WriteBytes(txn.ArbitraryData) - txn.NewFoundationAddress.EncodeTo(h.E) - txn.MinerFee.EncodeTo(h.E) - return h.Sum() -} - -// ContractSigHash returns the hash that must be signed for a file contract revision. -func (s State) ContractSigHash(fc types.FileContract) types.Hash256 { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/sig/filecontract") - h.E.WriteUint64(fc.Filesize) - fc.FileMerkleRoot.EncodeTo(h.E) - h.E.WriteUint64(fc.WindowStart) - h.E.WriteUint64(fc.WindowEnd) - fc.RenterOutput.EncodeTo(h.E) - fc.HostOutput.EncodeTo(h.E) - fc.MissedHostValue.EncodeTo(h.E) - fc.RenterPublicKey.EncodeTo(h.E) - fc.HostPublicKey.EncodeTo(h.E) - h.E.WriteUint64(fc.RevisionNumber) - return h.Sum() -} - -// RenewalSigHash returns the hash that must be signed for a file contract renewal. -func (s State) RenewalSigHash(fcr types.FileContractRenewal) types.Hash256 { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/sig/filecontractrenewal") - fcr.FinalRevision.EncodeTo(h.E) - fcr.InitialRevision.EncodeTo(h.E) - fcr.RenterRollover.EncodeTo(h.E) - fcr.HostRollover.EncodeTo(h.E) - return h.Sum() -} - -// AttestationSigHash returns the hash that must be signed for an attestation. -func (s State) AttestationSigHash(a types.Attestation) types.Hash256 { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/sig/attestation") - a.PublicKey.EncodeTo(h.E) - h.E.WriteString(a.Key) - h.E.WriteBytes(a.Value) - return h.Sum() -} - -// A Checkpoint pairs a block with its resulting chain state. -type Checkpoint struct { - Block types.Block - State State -} diff --git a/v2/consensus/update.go b/v2/consensus/update.go deleted file mode 100644 index 25305974..00000000 --- a/v2/consensus/update.go +++ /dev/null @@ -1,441 +0,0 @@ -package consensus - -import ( - "time" - - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/types" -) - -func updateOakTotals(s *State, h types.BlockHeader) (time.Duration, types.Work) { - parentTimestamp := s.PrevTimestamps[s.numTimestamps()-1] - blockTime := h.Timestamp.Sub(parentTimestamp) - blockWork := s.Difficulty - - // decay totals by 0.5% before adding the new values - decayedTime := s.OakTime - (s.OakTime / 200) + blockTime - decayedWork := s.OakWork.Sub(s.OakWork.Div64(200)).Add(blockWork) - return decayedTime, decayedWork -} - -func adjustDifficulty(s *State, h types.BlockHeader) types.Work { - // NOTE: To avoid overflow/underflow issues, this function operates on - // integer seconds (rather than time.Duration, which uses nanoseconds). This - // shouldn't appreciably affect the precision of the algorithm. - - blockInterval := s.BlockInterval() / time.Second - expectedTime := s.BlockInterval() * time.Duration(h.Height) - actualTime := h.Timestamp.Sub(s.GenesisTimestamp) / time.Second - delta := expectedTime - actualTime - // square the delta and preserve its sign - shift := delta * delta - if delta < 0 { - shift = -shift - } - // scale such that a delta of 10,000 produces a shift of 10 seconds - shift *= 10 - shift /= 10000 * 10000 - - // calculate the new target block time, clamped to a factor of 3 - targetBlockTime := blockInterval + shift - if min := blockInterval / 3; targetBlockTime < min { - targetBlockTime = min - } else if max := blockInterval * 3; targetBlockTime > max { - targetBlockTime = max - } - - // estimate the hashrate from the (decayed) total work and the (decayed, - // clamped) total time - if s.OakTime <= time.Second { - s.OakTime = time.Second - } - estimatedHashrate := s.OakWork.Div64(uint64(s.OakTime / time.Second)) - - // multiply the estimated hashrate by the target block time; this is the - // expected number of hashes required to produce the next block, i.e. the - // new difficulty - newDifficulty := estimatedHashrate.Mul64(uint64(targetBlockTime)) - - // clamp the adjustment to 0.4% - maxAdjust := s.Difficulty.Div64(250) - if min := s.Difficulty.Sub(maxAdjust); newDifficulty.Cmp(min) < 0 { - newDifficulty = min - } else if max := s.Difficulty.Add(maxAdjust); newDifficulty.Cmp(max) > 0 { - newDifficulty = max - } - return newDifficulty -} - -func applyHeader(s *State, h types.BlockHeader) { - if h.Height == 0 { - // special handling for GenesisUpdate - s.PrevTimestamps[0] = h.Timestamp - s.Index = h.Index() - return - } - s.TotalWork = s.TotalWork.Add(s.Difficulty) - s.OakTime, s.OakWork = updateOakTotals(s, h) - s.Difficulty = adjustDifficulty(s, h) - if s.numTimestamps() < len(s.PrevTimestamps) { - s.PrevTimestamps[s.numTimestamps()] = h.Timestamp - } else { - copy(s.PrevTimestamps[:], s.PrevTimestamps[1:]) - s.PrevTimestamps[len(s.PrevTimestamps)-1] = h.Timestamp - } - s.Index = h.Index() -} - -func updatedInBlock(s State, b types.Block, apply bool) (scos []types.SiacoinElement, sfos []types.SiafundElement, revised, resolved []types.FileContractElement, leaves []merkle.ElementLeaf) { - addLeaf := func(l merkle.ElementLeaf) { - // copy proofs so we don't mutate transaction data - l.MerkleProof = append([]types.Hash256(nil), l.MerkleProof...) - leaves = append(leaves, l) - } - - for _, txn := range b.Transactions { - for _, in := range txn.SiacoinInputs { - if in.Parent.LeafIndex != types.EphemeralLeafIndex { - scos = append(scos, in.Parent) - addLeaf(merkle.SiacoinLeaf(in.Parent, apply)) - } - } - for _, in := range txn.SiafundInputs { - sfos = append(sfos, in.Parent) - addLeaf(merkle.SiafundLeaf(in.Parent, apply)) - } - for _, fcr := range txn.FileContractRevisions { - fce := fcr.Parent - if apply { - fce.FileContract = fcr.Revision - } - revised = append(revised, fce) - addLeaf(merkle.FileContractLeaf(fce, false)) - } - for _, fcr := range txn.FileContractResolutions { - fce := fcr.Parent - if apply { - if fcr.HasRenewal() { - fce.FileContract = fcr.Renewal.FinalRevision - } else if fcr.HasFinalization() { - fce.FileContract = fcr.Finalization - } - } - resolved = append(resolved, fce) - addLeaf(merkle.FileContractLeaf(fce, apply)) - } - } - - return -} - -func createdInBlock(s State, b types.Block) (sces []types.SiacoinElement, sfes []types.SiafundElement, fces []types.FileContractElement) { - sces = append(sces, types.SiacoinElement{ - StateElement: types.StateElement{ - ID: b.MinerOutputID(), - }, - SiacoinOutput: types.SiacoinOutput{ - Value: s.BlockReward(), - Address: b.Header.MinerAddress, - }, - MaturityHeight: s.MaturityHeight(), - }) - if subsidy := s.FoundationSubsidy(); !subsidy.IsZero() { - sces = append(sces, types.SiacoinElement{ - StateElement: types.StateElement{ - ID: b.FoundationOutputID(), - }, - SiacoinOutput: types.SiacoinOutput{ - Value: subsidy, - Address: s.FoundationAddress, - }, - MaturityHeight: s.MaturityHeight(), - }) - } - for _, txn := range b.Transactions { - txid := txn.ID() - var index uint64 - nextElement := func() types.StateElement { - index++ - return types.StateElement{ - ID: types.ElementID{ - Source: types.Hash256(txid), - Index: index - 1, - }, - } - } - - for _, out := range txn.SiacoinOutputs { - sces = append(sces, types.SiacoinElement{ - StateElement: nextElement(), - SiacoinOutput: out, - }) - } - for _, in := range txn.SiafundInputs { - sces = append(sces, types.SiacoinElement{ - StateElement: nextElement(), - SiacoinOutput: types.SiacoinOutput{ - Value: s.SiafundPool.Sub(in.Parent.ClaimStart).Div64(s.SiafundCount()).Mul64(in.Parent.Value), - Address: in.ClaimAddress, - }, - MaturityHeight: s.MaturityHeight(), - }) - } - for _, out := range txn.SiafundOutputs { - sfes = append(sfes, types.SiafundElement{ - StateElement: nextElement(), - SiafundOutput: out, - ClaimStart: s.SiafundPool, - }) - } - for _, fc := range txn.FileContracts { - fces = append(fces, types.FileContractElement{ - StateElement: nextElement(), - FileContract: fc, - }) - } - for _, fcr := range txn.FileContractResolutions { - fce := fcr.Parent - var renter, host types.SiacoinOutput - if fcr.HasRenewal() { - renter, host = fcr.Renewal.FinalRevision.RenterOutput, fcr.Renewal.FinalRevision.HostOutput - renter.Value = renter.Value.Sub(fcr.Renewal.RenterRollover) - host.Value = host.Value.Sub(fcr.Renewal.HostRollover) - fces = append(fces, types.FileContractElement{ - StateElement: nextElement(), - FileContract: fcr.Renewal.InitialRevision, - }) - } else if fcr.HasStorageProof() { - renter, host = fce.RenterOutput, fce.HostOutput - } else if fcr.HasFinalization() { - renter, host = fcr.Finalization.RenterOutput, fcr.Finalization.HostOutput - } else if fce.Filesize == 0 { - renter, host = fce.RenterOutput, fce.HostOutput - } else { - renter, host = fce.RenterOutput, fce.MissedHostOutput() - } - sces = append(sces, types.SiacoinElement{ - StateElement: nextElement(), - SiacoinOutput: renter, - MaturityHeight: s.MaturityHeight(), - }) - sces = append(sces, types.SiacoinElement{ - StateElement: nextElement(), - SiacoinOutput: host, - MaturityHeight: s.MaturityHeight(), - }) - } - } - - return -} - -// A ApplyUpdate reflects the changes to consensus state resulting from the -// application of a block. -type ApplyUpdate struct { - merkle.ElementApplyUpdate - merkle.HistoryApplyUpdate - - State State - SpentSiacoins []types.SiacoinElement - SpentSiafunds []types.SiafundElement - RevisedFileContracts []types.FileContractElement - ResolvedFileContracts []types.FileContractElement - NewSiacoinElements []types.SiacoinElement - NewSiafundElements []types.SiafundElement - NewFileContracts []types.FileContractElement -} - -// SiacoinElementWasSpent returns true if the given SiacoinElement was spent. -func (au *ApplyUpdate) SiacoinElementWasSpent(sce types.SiacoinElement) bool { - for i := range au.SpentSiacoins { - if au.SpentSiacoins[i].LeafIndex == sce.LeafIndex { - return true - } - } - return false -} - -// SiafundElementWasSpent returns true if the given SiafundElement was spent. -func (au *ApplyUpdate) SiafundElementWasSpent(sfe types.SiafundElement) bool { - for i := range au.SpentSiafunds { - if au.SpentSiafunds[i].LeafIndex == sfe.LeafIndex { - return true - } - } - return false -} - -// FileContractElementWasResolved returns true if the given FileContractElement was resolved. -func (au *ApplyUpdate) FileContractElementWasResolved(fce types.FileContractElement) bool { - for i := range au.ResolvedFileContracts { - if au.ResolvedFileContracts[i].LeafIndex == fce.LeafIndex { - return true - } - } - return false -} - -// UpdateTransactionProofs updates the element proofs and window proofs of a -// transaction. -func (au *ApplyUpdate) UpdateTransactionProofs(txn *types.Transaction) { - for i := range txn.SiacoinInputs { - if txn.SiacoinInputs[i].Parent.LeafIndex != types.EphemeralLeafIndex { - au.UpdateElementProof(&txn.SiacoinInputs[i].Parent.StateElement) - } - } - for i := range txn.SiafundInputs { - if txn.SiafundInputs[i].Parent.LeafIndex != types.EphemeralLeafIndex { - au.UpdateElementProof(&txn.SiafundInputs[i].Parent.StateElement) - } - } - for i := range txn.FileContractRevisions { - au.UpdateElementProof(&txn.FileContractRevisions[i].Parent.StateElement) - } - for i := range txn.FileContractResolutions { - au.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) - au.UpdateWindowProof(&txn.FileContractResolutions[i].StorageProof) - } -} - -// ApplyBlock integrates a block into the current consensus state, producing an -// ApplyUpdate detailing the resulting changes. The block is assumed to be fully -// validated. -func ApplyBlock(s State, b types.Block) (au ApplyUpdate) { - if s.Index.Height > 0 && s.Index != b.Header.ParentIndex() { - panic("consensus: cannot apply non-child block") - } - - // update elements - var updated, created []merkle.ElementLeaf - au.SpentSiacoins, au.SpentSiafunds, au.RevisedFileContracts, au.ResolvedFileContracts, updated = updatedInBlock(s, b, true) - au.NewSiacoinElements, au.NewSiafundElements, au.NewFileContracts = createdInBlock(s, b) - spent := make(map[types.ElementID]bool) - for _, txn := range b.Transactions { - for _, in := range txn.SiacoinInputs { - if in.Parent.LeafIndex == types.EphemeralLeafIndex { - spent[in.Parent.ID] = true - } - } - } - for _, sce := range au.NewSiacoinElements { - created = append(created, merkle.SiacoinLeaf(sce, spent[sce.ID])) - } - for _, sfe := range au.NewSiafundElements { - created = append(created, merkle.SiafundLeaf(sfe, spent[sfe.ID])) - } - for _, fce := range au.NewFileContracts { - created = append(created, merkle.FileContractLeaf(fce, spent[fce.ID])) - } - au.ElementApplyUpdate = s.Elements.ApplyBlock(updated, created) - for i := range au.NewSiacoinElements { - au.NewSiacoinElements[i].StateElement = created[0].StateElement - created = created[1:] - } - for i := range au.NewSiafundElements { - au.NewSiafundElements[i].StateElement = created[0].StateElement - created = created[1:] - } - for i := range au.NewFileContracts { - au.NewFileContracts[i].StateElement = created[0].StateElement - created = created[1:] - } - - // update history - au.HistoryApplyUpdate = s.History.ApplyBlock(b.Index()) - - // update state - applyHeader(&s, b.Header) - for _, txn := range b.Transactions { - for _, fc := range txn.FileContracts { - s.SiafundPool = s.SiafundPool.Add(s.FileContractTax(fc)) - } - if txn.NewFoundationAddress != types.VoidAddress { - s.FoundationAddress = txn.NewFoundationAddress - } - } - au.State = s - - return -} - -// GenesisUpdate returns the ApplyUpdate for the genesis block b. -func GenesisUpdate(b types.Block, initialDifficulty types.Work) ApplyUpdate { - return ApplyBlock(State{ - Difficulty: initialDifficulty, - GenesisTimestamp: b.Header.Timestamp, - }, b) -} - -// A RevertUpdate reflects the changes to consensus state resulting from the -// removal of a block. -type RevertUpdate struct { - merkle.ElementRevertUpdate - merkle.HistoryRevertUpdate - - State State - SpentSiacoins []types.SiacoinElement - SpentSiafunds []types.SiafundElement - RevisedFileContracts []types.FileContractElement - ResolvedFileContracts []types.FileContractElement - NewSiacoinElements []types.SiacoinElement - NewSiafundElements []types.SiafundElement - NewFileContracts []types.FileContractElement -} - -// SiacoinElementWasRemoved returns true if the specified SiacoinElement was -// reverted. -func (ru *RevertUpdate) SiacoinElementWasRemoved(sce types.SiacoinElement) bool { - return sce.LeafIndex != types.EphemeralLeafIndex && sce.LeafIndex >= ru.State.Elements.NumLeaves -} - -// SiafundElementWasRemoved returns true if the specified SiafundElement was -// reverted. -func (ru *RevertUpdate) SiafundElementWasRemoved(sfe types.SiafundElement) bool { - return sfe.LeafIndex != types.EphemeralLeafIndex && sfe.LeafIndex >= ru.State.Elements.NumLeaves -} - -// FileContractElementWasRemoved returns true if the specified -// FileContractElement was reverted. -func (ru *RevertUpdate) FileContractElementWasRemoved(fce types.FileContractElement) bool { - return fce.LeafIndex != types.EphemeralLeafIndex && fce.LeafIndex >= ru.State.Elements.NumLeaves -} - -// UpdateTransactionProofs updates the element proofs and window proofs of a -// transaction. -func (ru *RevertUpdate) UpdateTransactionProofs(txn *types.Transaction) { - for i := range txn.SiacoinInputs { - if txn.SiacoinInputs[i].Parent.LeafIndex != types.EphemeralLeafIndex { - ru.UpdateElementProof(&txn.SiacoinInputs[i].Parent.StateElement) - } - } - for i := range txn.SiafundInputs { - if txn.SiafundInputs[i].Parent.LeafIndex != types.EphemeralLeafIndex { - ru.UpdateElementProof(&txn.SiafundInputs[i].Parent.StateElement) - } - } - for i := range txn.FileContractRevisions { - ru.UpdateElementProof(&txn.FileContractRevisions[i].Parent.StateElement) - } - for i := range txn.FileContractResolutions { - ru.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) - ru.UpdateWindowProof(&txn.FileContractResolutions[i].StorageProof) - } -} - -// RevertBlock produces a RevertUpdate from a block and the State -// prior to that block. -func RevertBlock(s State, b types.Block) (ru RevertUpdate) { - if b.Header.Height == 0 { - panic("consensus: cannot revert genesis block") - } else if s.Index != b.Header.ParentIndex() { - panic("consensus: cannot revert non-child block") - } - - ru.State = s - ru.HistoryRevertUpdate = ru.State.History.RevertBlock(b.Index()) - var updated []merkle.ElementLeaf - ru.SpentSiacoins, ru.SpentSiafunds, ru.RevisedFileContracts, ru.ResolvedFileContracts, updated = updatedInBlock(s, b, false) - ru.NewSiacoinElements, ru.NewSiafundElements, ru.NewFileContracts = createdInBlock(s, b) - ru.ElementRevertUpdate = ru.State.Elements.RevertBlock(updated) - return -} diff --git a/v2/consensus/update_test.go b/v2/consensus/update_test.go deleted file mode 100644 index 599b3ff3..00000000 --- a/v2/consensus/update_test.go +++ /dev/null @@ -1,1124 +0,0 @@ -package consensus - -import ( - "math" - "reflect" - "testing" - "time" - - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -func randAddr() types.Address { - return frand.Entropy256() -} - -func randAmount() types.Currency { - return types.NewCurrency( - frand.Uint64n(math.MaxUint64), - frand.Uint64n(math.MaxUint64), - ) -} - -func TestApplyBlock(t *testing.T) { - b := genesisWithSiacoinOutputs([]types.SiacoinOutput{ - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - }...) - update1 := GenesisUpdate(b, testingDifficulty) - acc1 := update1.State.Elements - origOutputs := update1.NewSiacoinElements - if len(origOutputs) != len(b.Transactions[0].SiacoinOutputs)+1 { - t.Fatalf("expected %v new outputs, got %v", len(b.Transactions[0].SiacoinOutputs)+1, len(origOutputs)) - } - // none of the outputs should be marked as spent - for _, o := range origOutputs { - if update1.SiacoinElementWasSpent(o) { - t.Error("update should not mark output as spent:", o) - } - if acc1.ContainsSpentSiacoinElement(o) || !acc1.ContainsUnspentSiacoinElement(o) { - t.Error("accumulator should contain unspent output:", o) - } - } - - // apply a block that spends some outputs - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: origOutputs[6], SpendPolicy: types.AnyoneCanSpend()}, - {Parent: origOutputs[7], SpendPolicy: types.AnyoneCanSpend()}, - {Parent: origOutputs[8], SpendPolicy: types.AnyoneCanSpend()}, - {Parent: origOutputs[9], SpendPolicy: types.AnyoneCanSpend()}, - }, - SiacoinOutputs: []types.SiacoinOutput{{ - Value: randAmount(), - Address: randAddr(), - }}, - MinerFee: randAmount(), - } - b = types.Block{ - Header: types.BlockHeader{ - Height: b.Header.Height + 1, - ParentID: b.ID(), - MinerAddress: randAddr(), - }, - Transactions: []types.Transaction{txn}, - } - - update2 := ApplyBlock(update1.State, b) - acc2 := update2.State.Elements - for i := range origOutputs { - update2.UpdateElementProof(&origOutputs[i].StateElement) - } - - // the update should mark each input as spent - for _, in := range txn.SiacoinInputs { - if !update2.SiacoinElementWasSpent(in.Parent) { - t.Error("update should mark input as spent:", in) - } - } - // the new accumulator should contain both the spent and unspent outputs - for _, o := range origOutputs { - if update2.SiacoinElementWasSpent(o) { - if acc2.ContainsUnspentSiacoinElement(o) || !acc2.ContainsSpentSiacoinElement(o) { - t.Error("accumulator should contain spent output:", o) - } - } else { - if acc2.ContainsSpentSiacoinElement(o) || !acc2.ContainsUnspentSiacoinElement(o) { - t.Error("accumulator should contain unspent output:", o) - } - } - } - - // if we instead revert that block, we should see the inputs being "created" - // again and the outputs being destroyed - revertUpdate := RevertBlock(update1.State, b) - revertAcc := revertUpdate.State.Elements - if len(revertUpdate.SpentSiacoins) != len(txn.SiacoinInputs) { - t.Error("number of spent outputs after revert should equal number of inputs") - } - for _, o := range update2.NewSiacoinElements { - if !revertUpdate.SiacoinElementWasRemoved(o) { - t.Error("output created in reverted block should be marked as removed") - } - } - // update (a copy of) the proofs to reflect the revert - outputsWithRevert := append([]types.SiacoinElement(nil), origOutputs...) - for i := range outputsWithRevert { - outputsWithRevert[i].MerkleProof = append([]types.Hash256(nil), outputsWithRevert[i].MerkleProof...) - revertUpdate.UpdateElementProof(&outputsWithRevert[i].StateElement) - } - // the reverted proofs should be identical to the proofs prior to b - for _, o := range outputsWithRevert { - if update1.SiacoinElementWasSpent(o) { - t.Error("update should not mark output as spent:", o) - } - if revertAcc.ContainsSpentSiacoinElement(o) { - t.Error("output should not be marked as spent:", o) - } - } - - // spend one of the outputs whose proof we've been maintaining, - // using an intermediary transaction to test "ephemeral" outputs - parentTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: origOutputs[2], SpendPolicy: types.AnyoneCanSpend()}, - }, - SiacoinOutputs: []types.SiacoinOutput{{ - Value: randAmount(), - Address: randAddr(), - }}, - } - childTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: types.SiacoinElement{ - StateElement: types.StateElement{ - ID: types.ElementID{ - Source: types.Hash256(parentTxn.ID()), - Index: 0, - }, - LeafIndex: types.EphemeralLeafIndex, - }, - SiacoinOutput: types.SiacoinOutput{ - Value: randAmount(), - Address: randAddr(), - }, - }, - SpendPolicy: types.AnyoneCanSpend(), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Value: randAmount(), - Address: randAddr(), - }}, - MinerFee: randAmount(), - } - - b = types.Block{ - Header: types.BlockHeader{ - Height: b.Header.Height + 1, - ParentID: b.ID(), - MinerAddress: randAddr(), - }, - Transactions: []types.Transaction{parentTxn, childTxn}, - } - - update3 := ApplyBlock(update2.State, b) - acc3 := update3.State.Elements - for i := range origOutputs { - update3.UpdateElementProof(&origOutputs[i].StateElement) - } - - // the update should mark each input as spent - for _, in := range parentTxn.SiacoinInputs { - if !update3.SiacoinElementWasSpent(in.Parent) { - t.Error("update should mark input as spent:", in) - } - } - // the new accumulator should contain both the spent and unspent outputs - for _, o := range origOutputs { - if update2.SiacoinElementWasSpent(o) || update3.SiacoinElementWasSpent(o) { - if acc3.ContainsUnspentSiacoinElement(o) || !acc3.ContainsSpentSiacoinElement(o) { - t.Error("accumulator should contain spent output:", o) - } - } else { - if acc3.ContainsSpentSiacoinElement(o) || !acc3.ContainsUnspentSiacoinElement(o) { - t.Error("accumulator should contain unspent output:", o) - } - } - } - - // TODO: we should also be checking childTxn, but we can't check the - // ephemeral output without knowing its index -} - -func TestRevertBlock(t *testing.T) { - b := genesisWithSiacoinOutputs([]types.SiacoinOutput{ - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - {Value: randAmount(), Address: randAddr()}, - }...) - update1 := GenesisUpdate(b, testingDifficulty) - origOutputs := update1.NewSiacoinElements - if len(origOutputs) != len(b.Transactions[0].SiacoinOutputs)+1 { - t.Fatalf("expected %v new outputs, got %v", len(b.Transactions[0].SiacoinOutputs)+1, len(origOutputs)) - } - - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: origOutputs[5], SpendPolicy: types.AnyoneCanSpend()}, - }, - SiacoinOutputs: []types.SiacoinOutput{{ - Value: randAmount(), - Address: randAddr(), - }}, - MinerFee: randAmount(), - } - b = types.Block{ - Header: types.BlockHeader{ - Height: b.Header.Height + 1, - ParentID: b.ID(), - MinerAddress: randAddr(), - }, - Transactions: []types.Transaction{txn}, - } - - update2 := ApplyBlock(update1.State, b) - for i := range origOutputs { - update2.UpdateElementProof(&origOutputs[i].StateElement) - } - - // revert the block. We should see the inputs being "created" again - // and the outputs being destroyed - revertUpdate := RevertBlock(update1.State, b) - if len(revertUpdate.SpentSiacoins) != len(txn.SiacoinInputs) { - t.Error("number of spent outputs after revert should equal number of inputs") - } - for _, o := range update2.NewSiacoinElements { - if !revertUpdate.SiacoinElementWasRemoved(o) { - t.Error("output created in reverted block should be marked as removed") - } - } - // update the proofs to reflect the revert - for i := range origOutputs { - revertUpdate.UpdateElementProof(&origOutputs[i].StateElement) - } - // the reverted proofs should be identical to the proofs prior to b - for _, o := range origOutputs { - if update1.SiacoinElementWasSpent(o) { - t.Error("update should not mark output as spent:", o) - } - if !update1.State.Elements.ContainsUnspentSiacoinElement(o) { - t.Error("output should be in the accumulator, marked as unspent:", o) - } - } -} - -func TestSiafunds(t *testing.T) { - pubkey, privkey := testingKeypair(0) - b := types.Block{ - Header: types.BlockHeader{Timestamp: time.Unix(734600000, 0)}, - Transactions: []types.Transaction{{SiafundOutputs: []types.SiafundOutput{{ - Address: types.StandardAddress(pubkey), - Value: 100, - }}}}, - } - sau := GenesisUpdate(b, testingDifficulty) - - // send siafunds to a new address - claimPubkey, claimPrivkey := testingKeypair(1) - txn := types.Transaction{ - SiafundInputs: []types.SiafundInput{{ - Parent: sau.NewSiafundElements[0], - SpendPolicy: types.PolicyPublicKey(pubkey), - ClaimAddress: types.StandardAddress(claimPubkey), - }}, - SiafundOutputs: []types.SiafundOutput{{ - Address: types.StandardAddress(claimPubkey), - Value: 100, - }}, - } - signAllInputs(&txn, sau.State, privkey) - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - - // should have created a siafund output, a block reward, and a claim output - if len(sau.NewSiafundElements) != 1 || sau.NewSiafundElements[0].Value != 100 { - t.Fatal("expected one new siafund output") - } else if len(sau.NewSiacoinElements) != 2 { - t.Fatal("expected one block reward and one claim output") - } - - // attempt to spend the claim output before it matures - txn = types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: sau.NewSiacoinElements[1], - SpendPolicy: types.PolicyPublicKey(claimPubkey), - }}, - MinerFee: sau.NewSiacoinElements[1].Value, - } - signAllInputs(&txn, sau.State, claimPrivkey) - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err == nil { - t.Fatal("expected error when attempting to spend immature claim output") - } - - // skip to maturity height and try again - sau.State.Index.Height = sau.NewSiacoinElements[1].MaturityHeight + 1 - sau.State.Index.ID = b.ID() - for i := range sau.State.PrevTimestamps { - sau.State.PrevTimestamps[i] = b.Header.Timestamp - } - b.Header.Height = sau.State.Index.Height - signAllInputs(&txn, sau.State, claimPrivkey) - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } -} - -func TestFoundationSubsidy(t *testing.T) { - // mine genesis block with initial Foundation address - pubkey, privkey := testingKeypair(0) - b := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(pubkey), - Value: types.NewCurrency64(100), - }) - b.Transactions[0].NewFoundationAddress = types.StandardAddress(pubkey) - sau := GenesisUpdate(b, testingDifficulty) - if sau.State.FoundationAddress != types.StandardAddress(pubkey) { - t.Fatal("Foundation address not updated") - } - initialOutput := sau.NewSiacoinElements[1] - - // skip to Foundation hardfork height; we should receive the initial subsidy - b.Header.Height = foundationHardforkHeight - 1 - sau.State.Index.Height = foundationHardforkHeight - 1 - for i := range sau.State.PrevTimestamps { - sau.State.PrevTimestamps[i] = b.Header.Timestamp - } - b = mineBlock(sau.State, b) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&initialOutput.StateElement) - subsidyID := types.ElementID{ - Source: types.Hash256(b.ID()), - Index: 1, - } - var subsidyOutput types.SiacoinElement - for _, o := range sau.NewSiacoinElements { - if o.ID == subsidyID { - subsidyOutput = o - break - } - } - if subsidyOutput.ID != subsidyID { - t.Fatal("subsidy output not created") - } - - // update the Foundation subsidy address - newAddress := types.Address{1, 2, 3} - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: initialOutput, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - NewFoundationAddress: newAddress, - MinerFee: initialOutput.Value, - } - signAllInputs(&txn, sau.State, privkey) - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&subsidyOutput.StateElement) - if sau.State.FoundationAddress != newAddress { - t.Fatal("Foundation address not updated") - } - - // skip beyond the maturity height of the initial subsidy output, and spend it - sau.State.Index.Height = subsidyOutput.MaturityHeight + 1 - txn = types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: subsidyOutput, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - MinerFee: subsidyOutput.Value, - } - signAllInputs(&txn, sau.State, privkey) - if err := sau.State.ValidateTransaction(txn); err != nil { - t.Fatal(err) - } - - // skip to the next foundation subsidy height; the foundation address should - // receive a new subsidy. - sau.State.Index.Height = foundationHardforkHeight + foundationSubsidyFrequency - 1 - b.Header.Height = sau.State.Index.Height - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - subsidyID = types.ElementID{ - Source: types.Hash256(b.ID()), - Index: 1, - } - for _, o := range sau.NewSiacoinElements { - if o.ID == subsidyID { - subsidyOutput = o - break - } - } - - // check that the output was created and has the expected value of - // 30000 SC * 4380 blocks per month. - if subsidyOutput.ID != subsidyID { - t.Fatal("subsidy output not created") - } else if exp := types.Siacoins(30000).Mul64(foundationSubsidyFrequency); !subsidyOutput.Value.Equals(exp) { - t.Fatalf("expected subsidy to be %v SC, got %v SC", exp, subsidyOutput.Value) - } -} - -func TestUpdateWindowProof(t *testing.T) { - for before := 0; before < 10; before++ { - for after := 0; after < 10; after++ { - b := genesisWithSiacoinOutputs() - sau := GenesisUpdate(b, testingDifficulty) - for i := 0; i < before; i++ { - b = mineBlock(sau.State, b) - sau = ApplyBlock(sau.State, b) - } - sp := types.StorageProof{ - WindowStart: sau.State.Index, - WindowProof: sau.HistoryProof(), - } - for i := 0; i < after; i++ { - b = mineBlock(sau.State, b) - sau = ApplyBlock(sau.State, b) - sau.UpdateWindowProof(&sp) - } - if !sau.State.History.Contains(sp.WindowStart, sp.WindowProof) { - t.Fatal("UpdateWindowProof created invalid history proof") - } - } - } -} - -func TestFileContracts(t *testing.T) { - renterPubkey, renterPrivkey := testingKeypair(0) - hostPubkey, hostPrivkey := testingKeypair(1) - b := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(100), - }, types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(7), - }) - sau := GenesisUpdate(b, testingDifficulty) - renterOutput := sau.NewSiacoinElements[1] - hostOutput := sau.NewSiacoinElements[2] - - // form initial contract - initialRev := types.FileContract{ - WindowStart: 5, - WindowEnd: 10, - RenterOutput: types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(58), - }, - HostOutput: types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(19), - }, - MissedHostValue: types.Siacoins(17), - TotalCollateral: types.Siacoins(18), - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - } - outputSum := initialRev.RenterOutput.Value.Add(initialRev.HostOutput.Value).Add(sau.State.FileContractTax(initialRev)) - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: renterOutput, SpendPolicy: types.PolicyPublicKey(renterPubkey)}, - {Parent: hostOutput, SpendPolicy: types.PolicyPublicKey(hostPubkey)}, - }, - FileContracts: []types.FileContract{initialRev}, - MinerFee: renterOutput.Value.Add(hostOutput.Value).Sub(outputSum), - } - fc := &txn.FileContracts[0] - contractHash := sau.State.ContractSigHash(*fc) - fc.RenterSignature = renterPrivkey.SignHash(contractHash) - fc.HostSignature = hostPrivkey.SignHash(contractHash) - sigHash := sau.State.InputSigHash(txn) - txn.SiacoinInputs[0].Signatures = []types.Signature{renterPrivkey.SignHash(sigHash)} - txn.SiacoinInputs[1].Signatures = []types.Signature{hostPrivkey.SignHash(sigHash)} - - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - - if len(sau.NewFileContracts) != 1 { - t.Fatal("expected one new file contract") - } - fce := sau.NewFileContracts[0] - if !sau.State.Elements.ContainsUnresolvedFileContractElement(fce) { - t.Fatal("accumulator should contain unresolved contract") - } - if sau.State.SiafundPool != sau.State.FileContractTax(initialRev) { - t.Fatal("expected siafund pool to increase") - } - - // renter and host now exchange data + revisions out-of-band; we simulate - // the final revision - data := frand.Bytes(64 * 2) - finalRev := types.FileContractRevision{ - Parent: fce, - Revision: fce.FileContract, - } - finalRev.Revision.FileMerkleRoot = merkle.NodeHash( - merkle.StorageProofLeafHash(data[:64]), - merkle.StorageProofLeafHash(data[64:]), - ) - finalRev.Revision.RevisionNumber++ - finalRev.Revision.Filesize = uint64(len(data)) - contractHash = sau.State.ContractSigHash(finalRev.Revision) - finalRev.Revision.RenterSignature = renterPrivkey.SignHash(contractHash) - finalRev.Revision.HostSignature = hostPrivkey.SignHash(contractHash) - txn = types.Transaction{ - FileContractRevisions: []types.FileContractRevision{finalRev}, - } - - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - if len(sau.RevisedFileContracts) != 1 { - t.Fatal("expected one revised file contract") - } - fce = sau.RevisedFileContracts[0] - sau.UpdateElementProof(&fce.StateElement) - - // mine until we enter the proof window - // - // NOTE: unlike other tests, we can't "cheat" here by fast-forwarding, - // because we need to maintain a history proof - for sau.State.Index.Height < fc.WindowStart { - b = mineBlock(sau.State, b) - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&fce.StateElement) - } - sp := types.StorageProof{ - WindowStart: sau.State.Index, - WindowProof: sau.HistoryProof(), - } - proofIndex := sau.State.StorageProofLeafIndex(finalRev.Revision.Filesize, sp.WindowStart, fce.ID) - copy(sp.Leaf[:], data[64*proofIndex:]) - if proofIndex == 0 { - sp.Proof = append(sp.Proof, merkle.StorageProofLeafHash(data[64:])) - } else { - sp.Proof = append(sp.Proof, merkle.StorageProofLeafHash(data[:64])) - } - - // create valid contract resolution - txn = types.Transaction{ - FileContractResolutions: []types.FileContractResolution{{ - Parent: fce, - StorageProof: sp, - }}, - } - - validBlock := mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(validBlock); err != nil { - t.Fatal(err) - } - validSAU := ApplyBlock(sau.State, validBlock) - if len(validSAU.ResolvedFileContracts) != 1 { - t.Fatal("expected one resolved file contract") - } else if len(validSAU.NewSiacoinElements) != 3 { - t.Fatal("expected three new siacoin outputs") - } else if validSAU.NewSiacoinElements[1].SiacoinOutput != finalRev.Revision.RenterOutput { - t.Fatal("expected renter output to be created") - } else if validSAU.NewSiacoinElements[2].SiacoinOutput != finalRev.Revision.HostOutput { - t.Fatal("expected valid host output to be created") - } - - // revert the block and instead mine past the proof window - for sau.State.Index.Height <= fc.WindowEnd { - b = mineBlock(sau.State, b) - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&txn.FileContractResolutions[0].Parent.StateElement) - sau.UpdateWindowProof(&txn.FileContractResolutions[0].StorageProof) - } - // storage proof resolution should now be rejected - if err := sau.State.ValidateTransaction(txn); err == nil { - t.Fatal("expected too-late storage proof to be rejected") - } - // missed resolution should be accepted, though - txn.FileContractResolutions[0].StorageProof = types.StorageProof{} - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - if len(sau.ResolvedFileContracts) != 1 { - t.Fatal("expected one resolved file contract") - } else if len(sau.NewSiacoinElements) != 3 { - t.Fatal("expected three new siacoin outputs") - } else if sau.NewSiacoinElements[1].SiacoinOutput != finalRev.Revision.RenterOutput { - t.Fatal("expected renter output to be created") - } else if sau.NewSiacoinElements[2].SiacoinOutput != finalRev.Revision.MissedHostOutput() { - t.Fatal("expected missed host output to be created") - } -} - -func TestContractRenewal(t *testing.T) { - renterPubkey, renterPrivkey := testingKeypair(0) - hostPubkey, hostPrivkey := testingKeypair(1) - b := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(100), - }, types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(7), - }, types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(200), - }) - sau := GenesisUpdate(b, testingDifficulty) - renterOutput := sau.NewSiacoinElements[1] - hostOutput := sau.NewSiacoinElements[2] - renewOutput := sau.NewSiacoinElements[3] - - // form initial contract - initialRev := types.FileContract{ - WindowStart: 5, - WindowEnd: 10, - RenterOutput: types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(58), - }, - HostOutput: types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(19), - }, - MissedHostValue: types.Siacoins(17), - TotalCollateral: types.Siacoins(18), - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - } - contractHash := sau.State.ContractSigHash(initialRev) - initialRev.RenterSignature = renterPrivkey.SignHash(contractHash) - initialRev.HostSignature = hostPrivkey.SignHash(contractHash) - outputSum := initialRev.RenterOutput.Value.Add(initialRev.HostOutput.Value).Add(sau.State.FileContractTax(initialRev)) - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: renterOutput, SpendPolicy: types.PolicyPublicKey(renterPubkey)}, - {Parent: hostOutput, SpendPolicy: types.PolicyPublicKey(hostPubkey)}, - }, - FileContracts: []types.FileContract{initialRev}, - MinerFee: renterOutput.Value.Add(hostOutput.Value).Sub(outputSum), - } - sigHash := sau.State.InputSigHash(txn) - txn.SiacoinInputs[0].Signatures = []types.Signature{renterPrivkey.SignHash(sigHash)} - txn.SiacoinInputs[1].Signatures = []types.Signature{hostPrivkey.SignHash(sigHash)} - - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&renewOutput.StateElement) - - if len(sau.NewFileContracts) != 1 { - t.Fatal("expected one new file contract") - } - fc := sau.NewFileContracts[0] - if !sau.State.Elements.ContainsUnresolvedFileContractElement(fc) { - t.Fatal("accumulator should contain unresolved contract") - } - - // construct the renewal by finalizing the old contract and initializing the - // new contract, rolling over some SC into the new contract - finalRev := fc.FileContract - finalRev.RevisionNumber = types.MaxRevisionNumber - contractHash = sau.State.ContractSigHash(finalRev) - finalRev.RenterSignature = renterPrivkey.SignHash(contractHash) - finalRev.HostSignature = hostPrivkey.SignHash(contractHash) - - initialRev = fc.FileContract - initialRev.RevisionNumber = 0 - initialRev.WindowStart += 10 - initialRev.WindowEnd += 10 - initialRev.RenterOutput.Value = types.Siacoins(100) - initialRev.HostOutput.Value = types.Siacoins(100) - initialRev.MissedHostValue = types.Siacoins(100) - initialRev.TotalCollateral = types.Siacoins(100) - contractHash = sau.State.ContractSigHash(initialRev) - initialRev.RenterSignature = renterPrivkey.SignHash(contractHash) - initialRev.HostSignature = hostPrivkey.SignHash(contractHash) - - renewal := types.FileContractRenewal{ - FinalRevision: finalRev, - InitialRevision: initialRev, - RenterRollover: types.Siacoins(3), - HostRollover: types.Siacoins(6), - } - renewalHash := sau.State.RenewalSigHash(renewal) - renewal.RenterSignature = renterPrivkey.SignHash(renewalHash) - renewal.HostSignature = hostPrivkey.SignHash(renewalHash) - - // since we increased the amount of value in the contract, we need to add - // more inputs - rollover := renewal.RenterRollover.Add(renewal.HostRollover) - contractCost := initialRev.RenterOutput.Value.Add(initialRev.HostOutput.Value).Add(sau.State.FileContractTax(initialRev)).Sub(rollover) - txn = types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: renewOutput, - SpendPolicy: types.PolicyPublicKey(renterPubkey), - }}, - FileContractResolutions: []types.FileContractResolution{{ - Parent: fc, - Renewal: renewal, - }}, - MinerFee: renewOutput.Value.Sub(contractCost), - } - sigHash = sau.State.InputSigHash(txn) - txn.SiacoinInputs[0].Signatures = []types.Signature{renterPrivkey.SignHash(sigHash)} - - // after applying the transaction, we should observe a number of effects: - // - the old contract should be marked resolved - // - the new contract should be created - // - the old contract payouts, sans rollover, should be created - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - expRenterOutput := types.SiacoinOutput{ - Value: finalRev.RenterOutput.Value.Sub(renewal.RenterRollover), - Address: finalRev.RenterOutput.Address, - } - expHostOutput := types.SiacoinOutput{ - Value: finalRev.HostOutput.Value.Sub(renewal.HostRollover), - Address: finalRev.HostOutput.Address, - } - if len(sau.ResolvedFileContracts) != 1 { - t.Fatal("expected one resolved file contract") - } else if len(sau.NewFileContracts) != 1 { - t.Fatal("expected one created file contract") - } else if len(sau.NewSiacoinElements) != 3 { - t.Fatal("expected three new siacoin outputs") - } else if sau.NewSiacoinElements[1].SiacoinOutput != expRenterOutput { - t.Fatal("expected valid renter output to be created", sau.NewSiacoinElements[1].SiacoinOutput, expRenterOutput) - } else if sau.NewSiacoinElements[1].MaturityHeight != sau.State.MaturityHeight()-1 { - t.Fatal("renter output has wrong maturity height") - } else if sau.NewSiacoinElements[2].SiacoinOutput != expHostOutput { - t.Fatal("expected valid host output to be created", sau.NewSiacoinElements[2].SiacoinOutput, expHostOutput) - } else if sau.NewSiacoinElements[2].MaturityHeight != sau.State.MaturityHeight()-1 { - t.Fatal("host output has wrong maturity height") - } - fc = sau.NewFileContracts[0] - if !sau.State.Elements.ContainsUnresolvedFileContractElement(fc) { - t.Fatal("accumulator should contain unresolved contract") - } - - // renew the contract again, this time with a total value less than the - // current contract; no additional funding should be required - finalRev = fc.FileContract - finalRev.RevisionNumber = types.MaxRevisionNumber - contractHash = sau.State.ContractSigHash(finalRev) - finalRev.RenterSignature = renterPrivkey.SignHash(contractHash) - finalRev.HostSignature = hostPrivkey.SignHash(contractHash) - - initialRev = fc.FileContract - initialRev.RevisionNumber = 0 - initialRev.WindowStart += 10 - initialRev.WindowEnd += 10 - initialRev.RenterOutput.Value = types.Siacoins(10) - initialRev.HostOutput.Value = types.Siacoins(10) - initialRev.MissedHostValue = types.Siacoins(10) - initialRev.TotalCollateral = types.Siacoins(10) - contractHash = sau.State.ContractSigHash(initialRev) - initialRev.RenterSignature = renterPrivkey.SignHash(contractHash) - initialRev.HostSignature = hostPrivkey.SignHash(contractHash) - - renewal = types.FileContractRenewal{ - FinalRevision: finalRev, - InitialRevision: initialRev, - RenterRollover: types.Siacoins(17).Add(sau.State.FileContractTax(initialRev)), - HostRollover: types.Siacoins(3), - } - renewalHash = sau.State.RenewalSigHash(renewal) - renewal.RenterSignature = renterPrivkey.SignHash(renewalHash) - renewal.HostSignature = hostPrivkey.SignHash(renewalHash) - - txn = types.Transaction{ - FileContractResolutions: []types.FileContractResolution{{ - Parent: fc, - Renewal: renewal, - }}, - } - - // apply the transaction - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - expRenterOutput = types.SiacoinOutput{ - Value: finalRev.RenterOutput.Value.Sub(renewal.RenterRollover), - Address: finalRev.RenterOutput.Address, - } - expHostOutput = types.SiacoinOutput{ - Value: finalRev.HostOutput.Value.Sub(renewal.HostRollover), - Address: finalRev.HostOutput.Address, - } - if len(sau.ResolvedFileContracts) != 1 { - t.Fatal("expected one resolved file contract") - } else if len(sau.NewFileContracts) != 1 { - t.Fatal("expected one created file contract") - } else if len(sau.NewSiacoinElements) != 3 { - t.Fatal("expected three new siacoin outputs") - } else if sau.NewSiacoinElements[1].SiacoinOutput != expRenterOutput { - t.Fatal("expected valid renter output to be created", sau.NewSiacoinElements[1].SiacoinOutput, expRenterOutput) - } else if sau.NewSiacoinElements[1].MaturityHeight != sau.State.MaturityHeight()-1 { - t.Fatal("renter output has wrong maturity height") - } else if sau.NewSiacoinElements[2].SiacoinOutput != expHostOutput { - t.Fatal("expected valid host output to be created", sau.NewSiacoinElements[2].SiacoinOutput, expHostOutput) - } else if sau.NewSiacoinElements[2].MaturityHeight != sau.State.MaturityHeight()-1 { - t.Fatal("host output has wrong maturity height") - } -} - -func TestContractFinalization(t *testing.T) { - renterPubkey, renterPrivkey := testingKeypair(0) - hostPubkey, hostPrivkey := testingKeypair(1) - b := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(100), - }, types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(7), - }) - sau := GenesisUpdate(b, testingDifficulty) - renterOutput := sau.NewSiacoinElements[1] - hostOutput := sau.NewSiacoinElements[2] - - // form initial contract - initialRev := types.FileContract{ - WindowStart: 5, - WindowEnd: 10, - RenterOutput: types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(58), - }, - HostOutput: types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(19), - }, - MissedHostValue: types.Siacoins(17), - TotalCollateral: types.Siacoins(18), - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - } - contractHash := sau.State.ContractSigHash(initialRev) - initialRev.RenterSignature = renterPrivkey.SignHash(contractHash) - initialRev.HostSignature = hostPrivkey.SignHash(contractHash) - outputSum := initialRev.RenterOutput.Value.Add(initialRev.HostOutput.Value).Add(sau.State.FileContractTax(initialRev)) - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: renterOutput, SpendPolicy: types.PolicyPublicKey(renterPubkey)}, - {Parent: hostOutput, SpendPolicy: types.PolicyPublicKey(hostPubkey)}, - }, - FileContracts: []types.FileContract{initialRev}, - MinerFee: renterOutput.Value.Add(hostOutput.Value).Sub(outputSum), - } - sigHash := sau.State.InputSigHash(txn) - txn.SiacoinInputs[0].Signatures = []types.Signature{renterPrivkey.SignHash(sigHash)} - txn.SiacoinInputs[1].Signatures = []types.Signature{hostPrivkey.SignHash(sigHash)} - - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - - if len(sau.NewFileContracts) != 1 { - t.Fatal("expected one new file contract") - } - fc := sau.NewFileContracts[0] - if !sau.State.Elements.ContainsUnresolvedFileContractElement(fc) { - t.Fatal("accumulator should contain unresolved contract") - } - if sau.State.SiafundPool != sau.State.FileContractTax(initialRev) { - t.Fatal("expected siafund pool to increase") - } - - // finalize the contract - finalRev := fc.FileContract - finalRev.RevisionNumber = types.MaxRevisionNumber - contractHash = sau.State.ContractSigHash(finalRev) - finalRev.RenterSignature = renterPrivkey.SignHash(contractHash) - finalRev.HostSignature = hostPrivkey.SignHash(contractHash) - txn = types.Transaction{ - FileContractResolutions: []types.FileContractResolution{{ - Parent: fc, - Finalization: finalRev, - }}, - } - - // after applying the transaction, the contract's outputs should be created immediately - b = mineBlock(sau.State, b, txn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - if len(sau.ResolvedFileContracts) != 1 { - t.Fatal("expected one resolved file contract") - } else if len(sau.NewSiacoinElements) != 3 { - t.Fatal("expected three new siacoin outputs") - } else if sau.NewSiacoinElements[1].SiacoinOutput != finalRev.RenterOutput { - t.Fatal("expected renter output to be created") - } else if sau.NewSiacoinElements[2].SiacoinOutput != finalRev.HostOutput { - t.Fatal("expected valid host output to be created") - } -} - -func TestRevertFileContractRevision(t *testing.T) { - renterPubkey, renterPrivkey := testingKeypair(0) - hostPubkey, hostPrivkey := testingKeypair(1) - b := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(100), - }, types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(7), - }) - parent := b - sau := GenesisUpdate(b, testingDifficulty) - renterOutput := sau.NewSiacoinElements[1] - hostOutput := sau.NewSiacoinElements[2] - prevState, s := sau.State, sau.State - - // form initial contract - initialRev := types.FileContract{ - WindowStart: 5, - WindowEnd: 10, - RenterOutput: types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(58), - }, - HostOutput: types.SiacoinOutput{ - Address: types.StandardAddress(hostPubkey), - Value: types.Siacoins(19), - }, - MissedHostValue: types.Siacoins(17), - TotalCollateral: types.Siacoins(18), - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - } - contractHash := s.ContractSigHash(initialRev) - initialRev.RenterSignature = renterPrivkey.SignHash(contractHash) - initialRev.HostSignature = hostPrivkey.SignHash(contractHash) - outputSum := initialRev.RenterOutput.Value.Add(initialRev.HostOutput.Value).Add(s.FileContractTax(initialRev)) - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: renterOutput, SpendPolicy: types.PolicyPublicKey(renterPubkey)}, - {Parent: hostOutput, SpendPolicy: types.PolicyPublicKey(hostPubkey)}, - }, - FileContracts: []types.FileContract{initialRev}, - MinerFee: renterOutput.Value.Add(hostOutput.Value).Sub(outputSum), - } - sigHash := s.InputSigHash(txn) - txn.SiacoinInputs[0].Signatures = []types.Signature{renterPrivkey.SignHash(sigHash)} - txn.SiacoinInputs[1].Signatures = []types.Signature{hostPrivkey.SignHash(sigHash)} - - // mine a block confirming the contract - parent, b = b, mineBlock(s, b, txn) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - prevState, s = s, sau.State - - // verify that the contract is now in the consensus set - if len(sau.NewFileContracts) != 1 { - t.Fatal("expected one new file contract") - } - fce := sau.NewFileContracts[0] - if !s.Elements.ContainsUnresolvedFileContractElement(fce) { - t.Fatal("accumulator should contain unresolved contract") - } else if !reflect.DeepEqual(fce.FileContract, initialRev) { - t.Fatal("expected file contract to match initial revision") - } - - // create a revision of the contract - rev1 := types.FileContractRevision{ - Parent: fce, - Revision: fce.FileContract, - } - rev1.Revision.RevisionNumber = 2 - contractHash = s.ContractSigHash(rev1.Revision) - rev1.Revision.RenterSignature = renterPrivkey.SignHash(contractHash) - rev1.Revision.HostSignature = hostPrivkey.SignHash(contractHash) - parent, b = b, mineBlock(s, b, types.Transaction{ - FileContractRevisions: []types.FileContractRevision{rev1}, - }) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - prevState, s = s, sau.State - if len(sau.RevisedFileContracts) != 1 { - t.Fatal("expected one revised file contract") - } - fce = sau.RevisedFileContracts[0] - if !reflect.DeepEqual(fce.FileContract, rev1.Revision) { - t.Fatal("revision 1 should be applied") - } - sau.UpdateElementProof(&fce.StateElement) - - // create a second revision of the contract - rev2 := types.FileContractRevision{ - Parent: fce, - Revision: fce.FileContract, - } - rev2.Revision.RevisionNumber = 4 - contractHash = s.ContractSigHash(rev2.Revision) - rev2.Revision.RenterSignature = renterPrivkey.SignHash(contractHash) - rev2.Revision.HostSignature = hostPrivkey.SignHash(contractHash) - parent, b = b, mineBlock(s, b, types.Transaction{ - FileContractRevisions: []types.FileContractRevision{rev2}, - }) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - prevState, s = s, sau.State - if len(sau.RevisedFileContracts) != 1 { - t.Fatal("expected one revised file contract") - } - fce = sau.RevisedFileContracts[0] - if !reflect.DeepEqual(fce.FileContract, rev2.Revision) { - t.Fatal("revision 2 should be applied") - } - sau.UpdateElementProof(&fce.StateElement) - - // revert the revision and confirm that the contract is reverted to it's - // rev1 state. - sru := RevertBlock(prevState, b) - b = parent - s = sru.State - fce = sru.RevisedFileContracts[0] - if !reflect.DeepEqual(fce.FileContract, rev1.Revision) { - t.Fatal("contract should revert to revision 1") - } - sru.UpdateElementProof(&fce.StateElement) - - // create a final revision of the contract - rev3 := types.FileContractRevision{ - Parent: fce, - Revision: fce.FileContract, - } - rev3.Revision.RevisionNumber = 3 - contractHash = s.ContractSigHash(rev3.Revision) - rev3.Revision.RenterSignature = renterPrivkey.SignHash(contractHash) - rev3.Revision.HostSignature = hostPrivkey.SignHash(contractHash) - txn = types.Transaction{ - FileContractRevisions: []types.FileContractRevision{rev3}, - } - parent, b = b, mineBlock(s, b, txn) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - prevState, s = s, sau.State - if len(sau.RevisedFileContracts) != 1 { - t.Fatal("expected one revised file contract") - } - fce = sau.RevisedFileContracts[0] - if !reflect.DeepEqual(fce.FileContract, rev3.Revision) { - t.Fatal("revision 3 should be applied") - } -} - -func BenchmarkApplyBlock(b *testing.B) { - block := types.Block{ - Transactions: []types.Transaction{{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: types.SiacoinElement{ - StateElement: types.StateElement{ - LeafIndex: types.EphemeralLeafIndex, - }, - }, - SpendPolicy: types.AnyoneCanSpend(), - }}, - SiacoinOutputs: make([]types.SiacoinOutput, 1000), - }}, - } - for i := 0; i < b.N; i++ { - ApplyBlock(State{}, block) - } -} diff --git a/v2/consensus/validation.go b/v2/consensus/validation.go deleted file mode 100644 index 5c0c1141..00000000 --- a/v2/consensus/validation.go +++ /dev/null @@ -1,626 +0,0 @@ -// Package consensus implements the Sia consensus algorithms. -package consensus - -import ( - "errors" - "fmt" - "math/bits" - "sort" - "time" - - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/types" -) - -var ( - // ErrOverweight is returned when a block's weight exceeds MaxBlockWeight. - ErrOverweight = errors.New("block is too heavy") - - // ErrOverflow is returned when the sum of a transaction's inputs and/or - // outputs overflows the Currency representation. - ErrOverflow = errors.New("sum of currency values overflowed") -) - -func (s State) medianTimestamp() time.Time { - prevCopy := s.PrevTimestamps - ts := prevCopy[:s.numTimestamps()] - sort.Slice(ts, func(i, j int) bool { return ts[i].Before(ts[j]) }) - if len(ts)%2 != 0 { - return ts[len(ts)/2] - } - l, r := ts[len(ts)/2-1], ts[len(ts)/2] - return l.Add(r.Sub(l) / 2) -} - -func (s State) validateHeader(h types.BlockHeader) error { - if h.Height != s.Index.Height+1 { - return errors.New("wrong height") - } else if h.ParentID != s.Index.ID { - return errors.New("wrong parent ID") - } else if h.Timestamp.Before(s.medianTimestamp()) { - return errors.New("timestamp is too far in the past") - } else if h.Nonce%s.NonceFactor() != 0 { - return errors.New("nonce is not divisible by required factor") - } else if types.WorkRequiredForHash(h.ID()).Cmp(s.Difficulty) < 0 { - return errors.New("insufficient work") - } - return nil -} - -func (s State) validateCurrencyValues(txn types.Transaction) error { - // Add up all of the currency values in the transaction and check for - // overflow. This allows us to freely add any currency values in later - // validation functions without worrying about overflow. - // - // NOTE: This check could be a little more "tolerant" -- currently it adds - // both the input and output values to the same sum, and it double-counts - // some value in file contracts. Even so, it won't be possible to construct - // a valid transaction that fails this check for ~50,000 years. - - var sum types.Currency - var sum64 uint64 - var overflowed bool - add := func(x types.Currency) { - if !overflowed { - sum, overflowed = sum.AddWithOverflow(x) - } - } - add64 := func(x uint64) { - if !overflowed { - s, carry := bits.Add64(sum64, x, 0) - sum64, overflowed = s, carry > 0 - } - } - addContract := func(fc types.FileContract) { - add(fc.RenterOutput.Value) - add(fc.HostOutput.Value) - add(fc.MissedHostValue) - add(fc.TotalCollateral) - add(s.FileContractTax(fc)) - } - - for _, in := range txn.SiacoinInputs { - add(in.Parent.Value) - } - for i, out := range txn.SiacoinOutputs { - if out.Value.IsZero() { - return fmt.Errorf("siacoin output %v has zero value", i) - } - add(out.Value) - } - for _, in := range txn.SiafundInputs { - add64(in.Parent.Value) - } - for i, out := range txn.SiafundOutputs { - if out.Value == 0 { - return fmt.Errorf("siafund output %v has zero value", i) - } - add64(out.Value) - } - for _, fc := range txn.FileContracts { - addContract(fc) - } - for _, fc := range txn.FileContractRevisions { - addContract(fc.Revision) - } - for _, fcr := range txn.FileContractResolutions { - if fcr.HasRenewal() { - add(fcr.Renewal.RenterRollover) - add(fcr.Renewal.HostRollover) - addContract(fcr.Renewal.InitialRevision) - } else if fcr.HasFinalization() { - addContract(fcr.Finalization) - } - } - add(txn.MinerFee) - if overflowed { - return ErrOverflow - } - return nil -} - -func (s State) validateTimeLocks(txn types.Transaction) error { - blockHeight := s.Index.Height + 1 - for i, in := range txn.SiacoinInputs { - if in.Parent.MaturityHeight > blockHeight { - return fmt.Errorf("siacoin input %v does not mature until block %v", i, in.Parent.MaturityHeight) - } - } - return nil -} - -func (s State) validateContract(fc types.FileContract) error { - switch { - case fc.WindowEnd <= s.Index.Height: - return fmt.Errorf("has proof window (%v-%v) that ends in the past", fc.WindowStart, fc.WindowEnd) - case fc.WindowEnd <= fc.WindowStart: - return fmt.Errorf("has proof window (%v-%v) that ends before it begins", fc.WindowStart, fc.WindowEnd) - case fc.MissedHostValue.Cmp(fc.HostOutput.Value) > 0: - return fmt.Errorf("has missed host value (%v SC) exceeding valid host value (%v SC)", fc.MissedHostValue, fc.HostOutput.Value) - case fc.TotalCollateral.Cmp(fc.HostOutput.Value) > 0: - return fmt.Errorf("has total collateral (%v SC) exceeding valid host value (%v SC)", fc.TotalCollateral, fc.HostOutput.Value) - } - contractHash := s.ContractSigHash(fc) - if !fc.RenterPublicKey.VerifyHash(contractHash, fc.RenterSignature) { - return fmt.Errorf("has invalid renter signature") - } else if !fc.HostPublicKey.VerifyHash(contractHash, fc.HostSignature) { - return fmt.Errorf("has invalid host signature") - } - return nil -} - -func (s State) validateRevision(cur, rev types.FileContract) error { - curOutputSum := cur.RenterOutput.Value.Add(cur.HostOutput.Value) - revOutputSum := rev.RenterOutput.Value.Add(rev.HostOutput.Value) - switch { - case rev.RevisionNumber <= cur.RevisionNumber: - return fmt.Errorf("does not increase revision number (%v -> %v)", cur.RevisionNumber, rev.RevisionNumber) - case !revOutputSum.Equals(curOutputSum): - return fmt.Errorf("modifies output sum (%v SC -> %v SC)", curOutputSum, revOutputSum) - case rev.TotalCollateral != cur.TotalCollateral: - return fmt.Errorf("modifies total collateral") - case rev.WindowEnd <= s.Index.Height: - return fmt.Errorf("has proof window (%v-%v) that ends in the past", rev.WindowStart, rev.WindowEnd) - case rev.WindowEnd <= rev.WindowStart: - return fmt.Errorf("has proof window (%v - %v) that ends before it begins", rev.WindowStart, rev.WindowEnd) - } - - // verify signatures - // - // NOTE: very important that we verify with the *current* keys! - contractHash := s.ContractSigHash(rev) - if !cur.RenterPublicKey.VerifyHash(contractHash, rev.RenterSignature) { - return fmt.Errorf("has invalid renter signature") - } else if !cur.HostPublicKey.VerifyHash(contractHash, rev.HostSignature) { - return fmt.Errorf("has invalid host signature") - } - return nil -} - -func (s State) validateFileContracts(txn types.Transaction) error { - for i, fc := range txn.FileContracts { - if err := s.validateContract(fc); err != nil { - return fmt.Errorf("file contract %v %s", i, err) - } - } - return nil -} - -func (s State) validateFileContractRevisions(txn types.Transaction) error { - for i, fcr := range txn.FileContractRevisions { - cur, rev := fcr.Parent.FileContract, fcr.Revision - if s.Index.Height > cur.WindowStart { - return fmt.Errorf("file contract revision %v cannot be applied to contract whose proof window (%v - %v) has already begun", i, cur.WindowStart, cur.WindowEnd) - } else if err := s.validateRevision(cur, rev); err != nil { - return fmt.Errorf("file contract revision %v %s", i, err) - } - } - return nil -} - -func (s State) validateFileContractResolutions(txn types.Transaction) error { - for i, fcr := range txn.FileContractResolutions { - // only one resolution type should be present - var typs int - for _, b := range [...]bool{ - fcr.HasRenewal(), - fcr.HasStorageProof(), - fcr.HasFinalization(), - } { - if b { - typs++ - } - } - if typs > 1 { - return fmt.Errorf("file contract resolution %v has multiple resolution types", i) - } - - fc := fcr.Parent.FileContract - if fcr.HasRenewal() { - // renter and host want to renew the contract, carrying over some - // funds and releasing the rest; this can be done at any point - // before WindowEnd (even before WindowStart) - old, renewed := fcr.Renewal.FinalRevision, fcr.Renewal.InitialRevision - if fc.WindowEnd < s.Index.Height { - return fmt.Errorf("file contract renewal %v cannot be applied to contract whose proof window (%v - %v) has expired", i, fc.WindowStart, fc.WindowEnd) - } else if old.RevisionNumber != types.MaxRevisionNumber { - return fmt.Errorf("file contract renewal %v does not finalize old contract", i) - } else if err := s.validateRevision(fc, old); err != nil { - return fmt.Errorf("file contract renewal %v has final revision that %s", i, err) - } else if err := s.validateContract(renewed); err != nil { - return fmt.Errorf("file contract renewal %v has initial revision that %s", i, err) - } - - // rollover must not exceed total contract value - rollover := fcr.Renewal.RenterRollover.Add(fcr.Renewal.HostRollover) - newContractCost := renewed.RenterOutput.Value.Add(renewed.HostOutput.Value).Add(s.FileContractTax(renewed)) - if fcr.Renewal.RenterRollover.Cmp(old.RenterOutput.Value) > 0 { - return fmt.Errorf("file contract renewal %v has renter rollover (%v SC) exceeding old output (%v SC)", i, fcr.Renewal.RenterRollover, old.RenterOutput.Value) - } else if fcr.Renewal.HostRollover.Cmp(old.HostOutput.Value) > 0 { - return fmt.Errorf("file contract renewal %v has host rollover (%v SC) exceeding old output (%v SC)", i, fcr.Renewal.HostRollover, old.HostOutput.Value) - } else if rollover.Cmp(newContractCost) > 0 { - return fmt.Errorf("file contract renewal %v has rollover (%v SC) exceeding new contract cost (%v SC)", i, rollover, newContractCost) - } - - renewalHash := s.RenewalSigHash(fcr.Renewal) - if !fc.RenterPublicKey.VerifyHash(renewalHash, fcr.Renewal.RenterSignature) { - return fmt.Errorf("file contract renewal %v has invalid renter signature", i) - } else if !fc.HostPublicKey.VerifyHash(renewalHash, fcr.Renewal.HostSignature) { - return fmt.Errorf("file contract renewal %v has invalid host signature", i) - } - } else if fcr.HasFinalization() { - // renter and host have agreed upon an explicit final contract - // state; this can be done at any point before WindowEnd (even - // before WindowStart) - if fc.WindowEnd < s.Index.Height { - return fmt.Errorf("file contract finalization %v cannot be applied to contract whose proof window (%v - %v) has expired", i, fc.WindowStart, fc.WindowEnd) - } else if fcr.Finalization.RevisionNumber != types.MaxRevisionNumber { - return fmt.Errorf("file contract finalization %v does not set maximum revision number", i) - } else if err := s.validateRevision(fc, fcr.Finalization); err != nil { - return fmt.Errorf("file contract finalization %v %s", i, err) - } - } else if fcr.HasStorageProof() { - // we must be within the proof window - if s.Index.Height < fc.WindowStart || fc.WindowEnd < s.Index.Height { - return fmt.Errorf("storage proof %v attempts to claim valid outputs outside the proof window (%v - %v)", i, fc.WindowStart, fc.WindowEnd) - } else if fcr.StorageProof.WindowStart.Height != fc.WindowStart { - // see note on this field in types.StorageProof - return fmt.Errorf("storage proof %v has WindowStart (%v) that does not match contract WindowStart (%v)", i, fcr.StorageProof.WindowStart.Height, fc.WindowStart) - } - leafIndex := s.StorageProofLeafIndex(fc.Filesize, fcr.StorageProof.WindowStart, fcr.Parent.ID) - if merkle.StorageProofRoot(fcr.StorageProof, leafIndex) != fc.FileMerkleRoot { - return fmt.Errorf("storage proof %v has root that does not match contract Merkle root", i) - } - } else if fc.Filesize == 0 { - // empty contract; can claim valid outputs after WindowStart - if s.Index.Height < fc.WindowStart { - return fmt.Errorf("file contract expiration %v attempts to claim valid outputs, but proof window (%v - %v) has not begun", i, fc.WindowStart, fc.WindowEnd) - } - } else { - // non-empty contract; can claim missed outputs after WindowEnd - if s.Index.Height <= fc.WindowEnd { - return fmt.Errorf("file contract expiration %v attempts to claim missed outputs, but proof window (%v - %v) has not expired", i, fc.WindowStart, fc.WindowEnd) - } - } - } - return nil -} - -func (s State) validateAttestations(txn types.Transaction) error { - for i, a := range txn.Attestations { - switch { - case len(a.Key) == 0: - return fmt.Errorf("attestation %v has empty key", i) - case !a.PublicKey.VerifyHash(s.AttestationSigHash(a), a.Signature): - return fmt.Errorf("attestation %v has invalid signature", i) - } - } - return nil -} - -func (s State) outputsEqualInputs(txn types.Transaction) error { - var inputSC, outputSC types.Currency - for _, in := range txn.SiacoinInputs { - inputSC = inputSC.Add(in.Parent.Value) - } - for _, out := range txn.SiacoinOutputs { - outputSC = outputSC.Add(out.Value) - } - for _, fc := range txn.FileContracts { - outputSC = outputSC.Add(fc.RenterOutput.Value).Add(fc.HostOutput.Value).Add(s.FileContractTax(fc)) - } - for _, fcr := range txn.FileContractResolutions { - if fcr.HasRenewal() { - // a renewal creates a new contract, optionally "rolling over" funds - // from the old contract - inputSC = inputSC.Add(fcr.Renewal.RenterRollover) - inputSC = inputSC.Add(fcr.Renewal.HostRollover) - - rev := fcr.Renewal.InitialRevision - outputSC = outputSC.Add(rev.RenterOutput.Value).Add(rev.HostOutput.Value).Add(s.FileContractTax(rev)) - } - } - - outputSC = outputSC.Add(txn.MinerFee) - if inputSC != outputSC { - return fmt.Errorf("siacoin inputs (%v SC) do not equal siacoin outputs (%v SC)", inputSC, outputSC) - } - - var inputSF, outputSF uint64 - for _, in := range txn.SiafundInputs { - inputSF += in.Parent.Value - } - for _, out := range txn.SiafundOutputs { - outputSF += out.Value - } - if inputSF != outputSF { - return fmt.Errorf("siafund inputs (%d SF) do not equal siafund outputs (%d SF)", inputSF, outputSF) - } - - return nil -} - -func (s State) validateStateProofs(txn types.Transaction) error { - for i, in := range txn.SiacoinInputs { - switch { - case in.Parent.LeafIndex == types.EphemeralLeafIndex: - continue - case s.Elements.ContainsUnspentSiacoinElement(in.Parent): - continue - case s.Elements.ContainsSpentSiacoinElement(in.Parent): - return fmt.Errorf("siacoin input %v double-spends output %v", i, in.Parent.ID) - default: - return fmt.Errorf("siacoin input %v spends output (%v) not present in the accumulator", i, in.Parent.ID) - } - } - for i, in := range txn.SiafundInputs { - switch { - case s.Elements.ContainsUnspentSiafundElement(in.Parent): - continue - case s.Elements.ContainsSpentSiafundElement(in.Parent): - return fmt.Errorf("siafund input %v double-spends output %v", i, in.Parent.ID) - default: - return fmt.Errorf("siafund input %v spends output (%v) not present in the accumulator", i, in.Parent.ID) - } - } - for i, fcr := range txn.FileContractRevisions { - switch { - case s.Elements.ContainsUnresolvedFileContractElement(fcr.Parent): - continue - case s.Elements.ContainsResolvedFileContractElement(fcr.Parent): - return fmt.Errorf("file contract revision %v revises a contract (%v) that has already resolved", i, fcr.Parent.ID) - default: - return fmt.Errorf("file contract revision %v revises a contract (%v) not present in the accumulator", i, fcr.Parent.ID) - } - } - for i, fcr := range txn.FileContractResolutions { - switch { - case s.Elements.ContainsUnresolvedFileContractElement(fcr.Parent): - continue - case s.Elements.ContainsResolvedFileContractElement(fcr.Parent): - return fmt.Errorf("file contract resolution %v resolves a contract (%v) that has already resolved", i, fcr.Parent.ID) - default: - return fmt.Errorf("file contract resolution %v resolves a contract (%v) not present in the accumulator", i, fcr.Parent.ID) - } - } - return nil -} - -func (s State) validateHistoryProofs(txn types.Transaction) error { - for i, fcr := range txn.FileContractResolutions { - if fcr.HasStorageProof() && !s.History.Contains(fcr.StorageProof.WindowStart, fcr.StorageProof.WindowProof) { - return fmt.Errorf("file contract resolution %v has storage proof with invalid history proof", i) - } - } - return nil -} - -func (s State) validateFoundationUpdate(txn types.Transaction) error { - if txn.NewFoundationAddress == types.VoidAddress { - return nil - } - for _, in := range txn.SiacoinInputs { - if in.Parent.Address == s.FoundationAddress { - return nil - } - } - return errors.New("transaction changes Foundation address, but does not spend an input controlled by current address") -} - -func (s State) validateSpendPolicies(txn types.Transaction) error { - sigHash := s.InputSigHash(txn) - verifyPolicy := func(p types.SpendPolicy, sigs []types.Signature) error { - var verify func(types.SpendPolicy) error - verify = func(p types.SpendPolicy) error { - switch p := p.Type.(type) { - case types.PolicyTypeAbove: - if s.Index.Height > uint64(p) { - return nil - } - return fmt.Errorf("height not above %v", uint64(p)) - case types.PolicyTypePublicKey: - for i := range sigs { - if types.PublicKey(p).VerifyHash(sigHash, sigs[i]) { - sigs = sigs[i+1:] - return nil - } - } - return errors.New("no signatures matching pubkey") - case types.PolicyTypeThreshold: - for i := 0; i < len(p.Of) && p.N > 0 && len(p.Of[i:]) >= int(p.N); i++ { - if verify(p.Of[i]) == nil { - p.N-- - } - } - if p.N != 0 { - return errors.New("threshold not reached") - } - return nil - case types.PolicyTypeUnlockConditions: - if err := verify(types.PolicyAbove(p.Timelock)); err != nil { - return err - } - n := p.SignaturesRequired - of := make([]types.SpendPolicy, len(p.PublicKeys)) - for i, pk := range p.PublicKeys { - of[i] = types.PolicyPublicKey(pk) - } - return verify(types.PolicyThreshold(n, of)) - } - panic("invalid policy type") // developer error - } - return verify(p) - } - - for i, in := range txn.SiacoinInputs { - if in.SpendPolicy.Address() != in.Parent.Address { - return fmt.Errorf("siacoin input %v claims incorrect policy for parent address", i) - } else if err := verifyPolicy(in.SpendPolicy, in.Signatures); err != nil { - return fmt.Errorf("siacoin input %v failed to satisfy spend policy: %w", i, err) - } - } - for i, in := range txn.SiafundInputs { - if in.SpendPolicy.Address() != in.Parent.Address { - return fmt.Errorf("siafund input %v claims incorrect policy for parent address", i) - } else if err := verifyPolicy(in.SpendPolicy, in.Signatures); err != nil { - return fmt.Errorf("siafund input %v failed to satisfy spend policy: %w", i, err) - } - } - return nil -} - -// ValidateTransaction partially validates txn for inclusion in a child block. -// It does not validate ephemeral outputs. -func (s State) ValidateTransaction(txn types.Transaction) error { - // check proofs first; that way, subsequent checks can assume that all - // parent StateElements are valid - if err := s.validateStateProofs(txn); err != nil { - return err - } else if err := s.validateHistoryProofs(txn); err != nil { - return err - } - - if err := s.validateCurrencyValues(txn); err != nil { - return err - } else if err := s.validateTimeLocks(txn); err != nil { - return err - } else if err := s.outputsEqualInputs(txn); err != nil { - return err - } else if err := s.validateFoundationUpdate(txn); err != nil { - return err - } else if err := s.validateFileContracts(txn); err != nil { - return err - } else if err := s.validateFileContractRevisions(txn); err != nil { - return err - } else if err := s.validateFileContractResolutions(txn); err != nil { - return err - } else if err := s.validateAttestations(txn); err != nil { - return err - } else if err := s.validateSpendPolicies(txn); err != nil { - return err - } - return nil -} - -func (s State) validateEphemeralOutputs(txns []types.Transaction) error { - // skip this check if no ephemeral outputs are present - for _, txn := range txns { - for _, in := range txn.SiacoinInputs { - if in.Parent.LeafIndex == types.EphemeralLeafIndex { - goto validate - } - } - } - return nil - -validate: - available := make(map[types.ElementID]types.SiacoinOutput) - for txnIndex, txn := range txns { - txid := txn.ID() - var index uint64 - nextID := func() types.ElementID { - id := types.ElementID{ - Source: types.Hash256(txid), - Index: index, - } - index++ - return id - } - - for _, in := range txn.SiacoinInputs { - if in.Parent.LeafIndex == types.EphemeralLeafIndex { - if out, ok := available[in.Parent.ID]; !ok { - return fmt.Errorf("transaction set is invalid: transaction %v claims non-existent ephemeral output %v", txnIndex, in.Parent.ID) - } else if in.Parent.Value != out.Value { - return fmt.Errorf("transaction set is invalid: transaction %v claims wrong value for ephemeral output %v", txnIndex, in.Parent.ID) - } else if in.Parent.Address != out.Address { - return fmt.Errorf("transaction set is invalid: transaction %v claims wrong address for ephemeral output %v", txnIndex, in.Parent.ID) - } - delete(available, in.Parent.ID) - } - } - for _, out := range txn.SiacoinOutputs { - available[nextID()] = out - } - } - return nil -} - -func (s State) noDoubleSpends(txns []types.Transaction) error { - spent := make(map[types.ElementID]int) - for i, txn := range txns { - for _, in := range txn.SiacoinInputs { - if prev, ok := spent[in.Parent.ID]; ok { - return fmt.Errorf("transaction set is invalid: transaction %v double-spends siacoin output %v (previously spent in transaction %v)", i, in.Parent.ID, prev) - } - spent[in.Parent.ID] = i - } - for prev, in := range txn.SiafundInputs { - if _, ok := spent[in.Parent.ID]; ok { - return fmt.Errorf("transaction set is invalid: transaction %v double-spends siafund output %v (previously spent in transaction %v)", i, in.Parent.ID, prev) - } - spent[in.Parent.ID] = i - } - } - return nil -} - -func (s State) noDoubleContractUpdates(txns []types.Transaction) error { - updated := make(map[types.ElementID]int) - for i, txn := range txns { - for _, in := range txn.FileContractRevisions { - if prev, ok := updated[in.Parent.ID]; ok { - return fmt.Errorf("transaction set is invalid: transaction %v updates contract %v multiple times (previously updated in transaction %v)", i, in.Parent.ID, prev) - } - updated[in.Parent.ID] = i - } - for _, in := range txn.FileContractResolutions { - if prev, ok := updated[in.Parent.ID]; ok { - return fmt.Errorf("transaction set is invalid: transaction %v updates contract %v multiple times (previously updated in transaction %v)", i, in.Parent.ID, prev) - } - updated[in.Parent.ID] = i - } - } - return nil -} - -// ValidateTransactionSet validates txns within the context of s. -func (s State) ValidateTransactionSet(txns []types.Transaction) error { - if s.BlockWeight(txns) > s.MaxBlockWeight() { - return ErrOverweight - } else if err := s.validateEphemeralOutputs(txns); err != nil { - return err - } else if err := s.noDoubleSpends(txns); err != nil { - return err - } else if err := s.noDoubleContractUpdates(txns); err != nil { - return err - } - for i, txn := range txns { - if err := s.ValidateTransaction(txn); err != nil { - return fmt.Errorf("transaction %v is invalid: %w", i, err) - } - } - return nil -} - -// ValidateBlock validates b in the context of s. -// -// This function does not check whether the header's timestamp is too far in the -// future. This check should be performed at the time the block is received, -// e.g. in p2p networking code; see MaxFutureTimestamp. -func (s State) ValidateBlock(b types.Block) error { - h := b.Header - if err := s.validateHeader(h); err != nil { - return err - } else if s.Commitment(h.MinerAddress, b.Transactions) != h.Commitment { - return errors.New("commitment hash does not match header") - } else if err := s.ValidateTransactionSet(b.Transactions); err != nil { - return err - } - return nil -} - -// MaxFutureTimestamp returns the maximum allowed timestamp for a block. -func (s State) MaxFutureTimestamp(currentTime time.Time) time.Time { - return currentTime.Add(2 * time.Hour) -} diff --git a/v2/consensus/validation_test.go b/v2/consensus/validation_test.go deleted file mode 100644 index cb1e9853..00000000 --- a/v2/consensus/validation_test.go +++ /dev/null @@ -1,1370 +0,0 @@ -package consensus - -import ( - "encoding/binary" - "math" - "reflect" - "strings" - "testing" - "time" - - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -var ( - maxCurrency = types.NewCurrency(math.MaxUint64, math.MaxUint64) - testingDifficulty = types.Work{NumHashes: [32]byte{30: 1}} -) - -func testingKeypair(seed uint64) (types.PublicKey, types.PrivateKey) { - var b [32]byte - binary.LittleEndian.PutUint64(b[:], seed) - privkey := types.NewPrivateKeyFromSeed(b[:]) - return privkey.PublicKey(), privkey -} - -func genesisWithSiacoinOutputs(scos ...types.SiacoinOutput) types.Block { - return types.Block{ - Header: types.BlockHeader{Timestamp: time.Unix(734600000, 0)}, - Transactions: []types.Transaction{{SiacoinOutputs: scos}}, - } -} - -func signAllInputs(txn *types.Transaction, s State, priv types.PrivateKey) { - sigHash := s.InputSigHash(*txn) - for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].Signatures = []types.Signature{priv.SignHash(sigHash)} - } - for i := range txn.SiafundInputs { - txn.SiafundInputs[i].Signatures = []types.Signature{priv.SignHash(sigHash)} - } -} - -func TestBlockRewardValue(t *testing.T) { - reward := func(height uint64) types.Currency { - return (&State{Index: types.ChainIndex{Height: height - 1}}).BlockReward() - } - - tests := []struct { - height uint64 - exp types.Currency - }{ - {0, types.Siacoins(300000)}, - {1, types.Siacoins(299999)}, - {100000, types.Siacoins(200000)}, - {269999, types.Siacoins(30001)}, - {270000, types.Siacoins(30000)}, - {270001, types.Siacoins(30000)}, - {1e6, types.Siacoins(30000)}, - } - for _, test := range tests { - if got := reward(test.height); got != test.exp { - t.Errorf("expected %v, got %v", test.exp, got) - } - } -} - -func TestEphemeralOutputs(t *testing.T) { - pubkey, privkey := testingKeypair(0) - sau := GenesisUpdate(genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(pubkey), - Value: types.Siacoins(1), - }), testingDifficulty) - - // create an ephemeral output - parentTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: sau.NewSiacoinElements[1], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Address: types.StandardAddress(pubkey), - Value: types.Siacoins(1), - }}, - } - signAllInputs(&parentTxn, sau.State, privkey) - ephemeralOutput := types.SiacoinElement{ - StateElement: types.StateElement{ - ID: types.ElementID{ - Source: types.Hash256(parentTxn.ID()), - Index: 0, - }, - LeafIndex: types.EphemeralLeafIndex, - }, - SiacoinOutput: types.SiacoinOutput{ - Value: parentTxn.SiacoinOutputs[0].Value, - Address: types.StandardAddress(pubkey), - }, - } - - // create a transaction that spends the ephemeral output - childTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: ephemeralOutput, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Address: types.StandardAddress(pubkey), - Value: ephemeralOutput.Value, - }}, - } - signAllInputs(&childTxn, sau.State, privkey) - - // the transaction set should be valid - if err := sau.State.ValidateTransactionSet([]types.Transaction{parentTxn, childTxn}); err != nil { - t.Fatal(err) - } - - // change the value of the output and attempt to spend it - mintTxn := childTxn.DeepCopy() - mintTxn.SiacoinInputs[0].Parent.Value = types.Siacoins(1e6) - mintTxn.SiacoinOutputs[0].Value = mintTxn.SiacoinInputs[0].Parent.Value - signAllInputs(&mintTxn, sau.State, privkey) - - if err := sau.State.ValidateTransactionSet([]types.Transaction{parentTxn, mintTxn}); err == nil { - t.Fatal("ephemeral output with wrong value should be rejected") - } - - // add another transaction to the set that double-spends the output - doubleSpendTxn := childTxn.DeepCopy() - doubleSpendTxn.SiacoinOutputs[0].Address = types.VoidAddress - signAllInputs(&doubleSpendTxn, sau.State, privkey) - - if err := sau.State.ValidateTransactionSet([]types.Transaction{parentTxn, childTxn, doubleSpendTxn}); err == nil { - t.Fatal("ephemeral output double-spend not rejected") - } - - invalidTxn := childTxn.DeepCopy() - invalidTxn.SiacoinInputs[0].Parent.Address = types.VoidAddress - signAllInputs(&invalidTxn, sau.State, privkey) - - if err := sau.State.ValidateTransactionSet([]types.Transaction{parentTxn, invalidTxn}); err == nil { - t.Fatal("transaction claims wrong address for ephemeral output") - } -} - -func TestValidateTransaction(t *testing.T) { - // This test constructs a complex transaction and then corrupts it in - // various ways to produce validation errors. Since the transaction is so - // complex, we need to perform quite a bit of setup to create the necessary - // outputs and accumulator state. - - // create genesis block with multiple outputs and file contracts - pubkey, privkey := testingKeypair(0) - renterPubkey, renterPrivkey := testingKeypair(1) - hostPubkey, hostPrivkey := testingKeypair(2) - data := frand.Bytes(64 * 2) - dataRoot := merkle.NodeHash( - merkle.StorageProofLeafHash(data[:64]), - merkle.StorageProofLeafHash(data[64:]), - ) - genesisBlock := types.Block{ - Header: types.BlockHeader{Timestamp: time.Unix(734600000, 0)}, - Transactions: []types.Transaction{{ - SiacoinOutputs: []types.SiacoinOutput{ - { - Address: types.StandardAddress(pubkey), - Value: types.Siacoins(11), - }, - { - Address: types.StandardAddress(pubkey), - Value: types.Siacoins(11), - }, - { - Address: types.StandardAddress(pubkey), - Value: maxCurrency, - }, - }, - SiafundOutputs: []types.SiafundOutput{ - { - Address: types.StandardAddress(pubkey), - Value: 100, - }, - { - Address: types.StandardAddress(pubkey), - Value: 100, - }, - { - Address: types.StandardAddress(pubkey), - Value: math.MaxUint64, - }, - }, - FileContracts: []types.FileContract{ - // unresolved open contract - { - WindowStart: 5, - WindowEnd: 10, - RenterOutput: types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(58), - }, - HostOutput: types.SiacoinOutput{ - Address: types.StandardAddress(renterPubkey), - Value: types.Siacoins(19), - }, - TotalCollateral: types.ZeroCurrency, - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - }, - // unresolved closed contract - { - WindowStart: 0, - WindowEnd: 10, - Filesize: uint64(len(data)), - FileMerkleRoot: dataRoot, - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - }, - // resolved-valid contract - { - WindowStart: 0, - WindowEnd: 10, - Filesize: uint64(len(data)), - FileMerkleRoot: dataRoot, - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - }, - // resolved-missed contract - { - WindowStart: 0, - WindowEnd: 0, - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - }, - }, - }}, - } - sau := GenesisUpdate(genesisBlock, testingDifficulty) - spentSC := sau.NewSiacoinElements[1] - unspentSC := sau.NewSiacoinElements[2] - overflowSC := sau.NewSiacoinElements[3] - spentSF := sau.NewSiafundElements[0] - unspentSF := sau.NewSiafundElements[1] - overflowSF := sau.NewSiafundElements[2] - openContract := sau.NewFileContracts[0] - closedContract := sau.NewFileContracts[1] - resolvedValidContract := sau.NewFileContracts[2] - resolvedMissedContract := sau.NewFileContracts[3] - closedProof := types.StorageProof{ - WindowStart: sau.State.Index, - WindowProof: sau.HistoryProof(), - } - proofIndex := sau.State.StorageProofLeafIndex(closedContract.Filesize, closedProof.WindowStart, closedContract.ID) - copy(closedProof.Leaf[:], data[64*proofIndex:]) - if proofIndex == 0 { - closedProof.Proof = append(closedProof.Proof, merkle.StorageProofLeafHash(data[64:])) - } else { - closedProof.Proof = append(closedProof.Proof, merkle.StorageProofLeafHash(data[:64])) - } - resolvedValidProof := types.StorageProof{ - WindowStart: sau.State.Index, - WindowProof: sau.HistoryProof(), - } - proofIndex = sau.State.StorageProofLeafIndex(resolvedValidContract.Filesize, resolvedValidProof.WindowStart, resolvedValidContract.ID) - copy(resolvedValidProof.Leaf[:], data[64*proofIndex:]) - if proofIndex == 0 { - resolvedValidProof.Proof = append(resolvedValidProof.Proof, merkle.StorageProofLeafHash(data[64:])) - } else { - resolvedValidProof.Proof = append(resolvedValidProof.Proof, merkle.StorageProofLeafHash(data[:64])) - } - - // mine a block so that resolvedMissedContract's proof window expires, then - // construct a setup transaction that spends some of the outputs and - // resolves some of the contracts - b := mineBlock(sau.State, genesisBlock) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&spentSC.StateElement) - sau.UpdateElementProof(&unspentSC.StateElement) - sau.UpdateElementProof(&spentSF.StateElement) - sau.UpdateElementProof(&unspentSF.StateElement) - sau.UpdateElementProof(&openContract.StateElement) - sau.UpdateElementProof(&closedContract.StateElement) - sau.UpdateElementProof(&resolvedValidContract.StateElement) - sau.UpdateElementProof(&resolvedMissedContract.StateElement) - sau.UpdateWindowProof(&closedProof) - sau.UpdateWindowProof(&resolvedValidProof) - resolveTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: spentSC, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiafundInputs: []types.SiafundInput{{ - Parent: spentSF, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Address: types.VoidAddress, - Value: spentSC.Value, - }}, - SiafundOutputs: []types.SiafundOutput{{ - Address: types.VoidAddress, - Value: spentSF.Value, - }}, - FileContractResolutions: []types.FileContractResolution{ - { - Parent: resolvedMissedContract, - }, - { - Parent: resolvedValidContract, - StorageProof: resolvedValidProof, - }, - }, - } - signAllInputs(&resolveTxn, sau.State, privkey) - b = mineBlock(sau.State, b, resolveTxn) - if err := sau.State.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(sau.State, b) - sau.UpdateElementProof(&spentSC.StateElement) - sau.UpdateElementProof(&unspentSC.StateElement) - sau.UpdateElementProof(&spentSF.StateElement) - sau.UpdateElementProof(&unspentSF.StateElement) - sau.UpdateElementProof(&openContract.StateElement) - sau.UpdateElementProof(&closedContract.StateElement) - sau.UpdateElementProof(&resolvedValidContract.StateElement) - sau.UpdateElementProof(&resolvedMissedContract.StateElement) - sau.UpdateWindowProof(&closedProof) - s := sau.State - - // finally, create the valid transaction, which spends the remaining outputs - // and revises/resolves the remaining contracts - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: unspentSC, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Address: types.VoidAddress, - Value: types.Siacoins(1), - }}, - SiafundInputs: []types.SiafundInput{{ - Parent: unspentSF, - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiafundOutputs: []types.SiafundOutput{{ - Address: types.VoidAddress, - Value: unspentSF.Value, - }}, - FileContracts: []types.FileContract{{ - WindowStart: 100, - WindowEnd: 105, - RenterOutput: types.SiacoinOutput{Value: types.Siacoins(1)}, - HostOutput: types.SiacoinOutput{Value: types.Siacoins(4)}, - TotalCollateral: types.Siacoins(1), - RenterPublicKey: renterPubkey, - HostPublicKey: hostPubkey, - }}, - FileContractRevisions: []types.FileContractRevision{{ - Parent: openContract, - Revision: types.FileContract{ - WindowStart: 200, - WindowEnd: 205, - RenterOutput: types.SiacoinOutput{Value: types.Siacoins(77)}, - HostOutput: types.SiacoinOutput{Value: types.Siacoins(0)}, - TotalCollateral: types.ZeroCurrency, - RevisionNumber: 1, - }, - }}, - FileContractResolutions: []types.FileContractResolution{{ - Parent: closedContract, - StorageProof: closedProof, - }}, - Attestations: []types.Attestation{{ - PublicKey: pubkey, - Key: "foo", - Value: []byte("bar"), - }}, - MinerFee: types.Siacoins(48).Div64(10), - } - fc := &txn.FileContracts[0] - contractHash := s.ContractSigHash(*fc) - fc.RenterSignature = renterPrivkey.SignHash(contractHash) - fc.HostSignature = hostPrivkey.SignHash(contractHash) - rev := &txn.FileContractRevisions[0] - contractHash = s.ContractSigHash(rev.Revision) - rev.Revision.RenterSignature = renterPrivkey.SignHash(contractHash) - rev.Revision.HostSignature = hostPrivkey.SignHash(contractHash) - txn.Attestations[0].Signature = privkey.SignHash(s.AttestationSigHash(txn.Attestations[0])) - signAllInputs(&txn, s, privkey) - - if err := s.ValidateTransaction(txn); err != nil { - t.Fatal(err) - } - - // corrupt the transaction in various ways to trigger validation errors - tests := []struct { - desc string - corrupt func(*types.Transaction) - }{ - { - "zero-valued SiacoinOutput", - func(txn *types.Transaction) { - txn.SiacoinOutputs[0].Value = types.ZeroCurrency - }, - }, - { - "zero-valued SiafundOutput", - func(txn *types.Transaction) { - txn.SiafundOutputs[0].Value = 0 - }, - }, - { - "siacoin input address does not match spend policy", - func(txn *types.Transaction) { - txn.SiacoinInputs[0].SpendPolicy = types.AnyoneCanSpend() - }, - }, - { - "siafund input address does not match spend policy", - func(txn *types.Transaction) { - txn.SiafundInputs[0].SpendPolicy = types.AnyoneCanSpend() - }, - }, - { - "siacoin outputs that do not equal inputs", - func(txn *types.Transaction) { - txn.SiacoinOutputs[0].Value = txn.SiacoinOutputs[0].Value.Div64(2) - }, - }, - { - "siacoin inputs that overflow", - func(txn *types.Transaction) { - txn.SiacoinInputs = append(txn.SiacoinInputs, types.SiacoinInput{ - Parent: overflowSC, - SpendPolicy: types.PolicyPublicKey(pubkey), - }) - signAllInputs(txn, s, privkey) - }, - }, - { - "siacoin outputs that overflow", - func(txn *types.Transaction) { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: maxCurrency, - }) - }, - }, - { - "siafund outputs that do not equal inputs", - func(txn *types.Transaction) { - txn.SiafundOutputs[0].Value /= 2 - }, - }, - { - "siafund inputs that overflow", - func(txn *types.Transaction) { - txn.SiafundInputs = append(txn.SiafundInputs, types.SiafundInput{ - Parent: overflowSF, - SpendPolicy: types.PolicyPublicKey(pubkey), - }) - signAllInputs(txn, s, privkey) - }, - }, - { - "siafund outputs that overflow", - func(txn *types.Transaction) { - txn.SiafundOutputs = append(txn.SiafundOutputs, types.SiafundOutput{ - Value: math.MaxUint64, - }) - }, - }, - { - "file contract renter output overflows", - func(txn *types.Transaction) { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: maxCurrency.Sub(types.Siacoins(2)), - }) - txn.FileContracts[0].RenterOutput.Value = types.Siacoins(2) - }, - }, - { - "file contract host output overflows", - func(txn *types.Transaction) { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: maxCurrency.Sub(types.Siacoins(2)), - }) - txn.FileContracts[0].RenterOutput.Value = types.ZeroCurrency - txn.FileContracts[0].HostOutput.Value = types.Siacoins(2) - }, - }, - { - "file contract collateral overflows", - func(txn *types.Transaction) { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: maxCurrency.Sub(types.Siacoins(2)), - }) - txn.FileContracts[0].RenterOutput.Value = types.ZeroCurrency - txn.FileContracts[0].HostOutput.Value = types.ZeroCurrency - txn.FileContracts[0].TotalCollateral = types.Siacoins(2) - }, - }, - { - "file contract tax overflows", - func(txn *types.Transaction) { - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: maxCurrency.Sub(types.Siacoins(2)), - }) - txn.FileContracts[0].RenterOutput.Value = types.Siacoins(1) - txn.FileContracts[0].HostOutput.Value = types.ZeroCurrency - }, - }, - { - "miner fee that overflows", - func(txn *types.Transaction) { - txn.MinerFee = maxCurrency - }, - }, - { - "non-existent siacoin output", - func(txn *types.Transaction) { - txn.SiacoinInputs[0].Parent.ID = types.ElementID{} - }, - }, - { - "double-spent siacoin output", - func(txn *types.Transaction) { - txn.SiacoinInputs[0].Parent = spentSC - }, - }, - { - "invalid siacoin signature", - func(txn *types.Transaction) { - txn.SiacoinInputs[0].Signatures[0][0] ^= 1 - }, - }, - { - "non-existent siafund output", - func(txn *types.Transaction) { - txn.SiafundInputs[0].Parent.ID = types.ElementID{} - }, - }, - { - "double-spent siafund output", - func(txn *types.Transaction) { - txn.SiafundInputs[0].Parent = spentSF - }, - }, - { - "invalid siafund signature", - func(txn *types.Transaction) { - txn.SiafundInputs[0].Signatures[0][0] ^= 1 - }, - }, - { - "file contract that has invalid renter signature", - func(txn *types.Transaction) { - txn.FileContracts[0].RenterSignature[0] ^= 1 - }, - }, - { - "file contract that has invalid host signature", - func(txn *types.Transaction) { - txn.FileContracts[0].HostSignature[0] ^= 1 - }, - }, - { - "file contract whose window ends before it begins", - func(txn *types.Transaction) { - txn.FileContracts[0].WindowEnd = txn.FileContracts[0].WindowStart - 1 - }, - }, - { - "revision of non-existent file contract", - func(txn *types.Transaction) { - txn.FileContractRevisions[0].Parent.ID = types.ElementID{} - }, - }, - { - "revision of already-resolved-valid file contract", - func(txn *types.Transaction) { - txn.FileContractRevisions[0].Parent = resolvedValidContract - }, - }, - { - "revision of already-resolved-missed file contract", - func(txn *types.Transaction) { - txn.FileContractRevisions[0].Parent = resolvedMissedContract - }, - }, - { - "file contract revision that does not increase revision number", - func(txn *types.Transaction) { - rev := &txn.FileContractRevisions[0].Revision - rev.RevisionNumber = 0 - }, - }, - { - "file contract revision that modifies output sum", - func(txn *types.Transaction) { - rev := &txn.FileContractRevisions[0].Revision - rev.RenterOutput.Value = rev.RenterOutput.Value.Mul64(2) - }, - }, - { - "file contract revision whose window ends before it begins", - func(txn *types.Transaction) { - rev := &txn.FileContractRevisions[0].Revision - rev.WindowEnd = rev.WindowStart - 1 - }, - }, - { - "file contract revision that has invalid renter signature", - func(txn *types.Transaction) { - rev := &txn.FileContractRevisions[0].Revision - rev.RenterSignature[0] ^= 1 - }, - }, - { - "file contract revision that has invalid host signature", - func(txn *types.Transaction) { - rev := &txn.FileContractRevisions[0].Revision - rev.HostSignature[0] ^= 1 - }, - }, - { - "resolution of non-existent file contract", - func(txn *types.Transaction) { - txn.FileContractResolutions[0].Parent.ID = types.ElementID{} - }, - }, - { - "resolution with invalid history proof", - func(txn *types.Transaction) { - txn.FileContractResolutions[0].StorageProof.WindowProof = nil - }, - }, - { - "resolution of already-resolved-valid file contract", - func(txn *types.Transaction) { - txn.FileContractResolutions[0].Parent = resolvedValidContract - }, - }, - { - "resolution of already-resolved-missed file contract", - func(txn *types.Transaction) { - txn.FileContractResolutions[0].Parent = resolvedMissedContract - }, - }, - { - "file contract resolution whose WindowStart does not match final revision", - func(txn *types.Transaction) { - res := &txn.FileContractResolutions[0] - res.StorageProof.WindowStart = b.Index() - res.StorageProof.WindowProof = nil - }, - }, - { - "file contract resolution whose storage proof root does not match final Merkle root", - func(txn *types.Transaction) { - res := &txn.FileContractResolutions[0] - res.StorageProof.Proof[0][0] ^= 1 - }, - }, - { - "attestation with invalid signature", - func(txn *types.Transaction) { - txn.Attestations[0].Signature[0] ^= 1 - }, - }, - { - "invalid Foundation update", - func(txn *types.Transaction) { - txn.NewFoundationAddress = types.StandardAddress(pubkey) - }, - }, - } - for _, test := range tests { - corruptTxn := txn.DeepCopy() - test.corrupt(&corruptTxn) - if err := s.ValidateTransaction(corruptTxn); err == nil { - t.Fatalf("accepted transaction with %v", test.desc) - } - } -} - -func TestValidateSpendPolicy(t *testing.T) { - // create a State with a height above 0 - s := State{ - Index: types.ChainIndex{Height: 100}, - } - - privkey := func(seed uint64) types.PrivateKey { - _, privkey := testingKeypair(seed) - return privkey - } - pubkey := func(seed uint64) types.PublicKey { - pubkey, _ := testingKeypair(seed) - return pubkey - } - - tests := []struct { - desc string - policy types.SpendPolicy - sign func(sigHash types.Hash256) []types.Signature - wantErr bool - }{ - { - desc: "not enough signatures", - policy: types.PolicyThreshold( - 2, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(0)), - types.PolicyPublicKey(pubkey(1)), - }, - ), - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{privkey(0).SignHash(sigHash)} - }, - wantErr: true, - }, - { - desc: "height not above", - policy: types.PolicyAbove(150), - sign: func(types.Hash256) []types.Signature { return nil }, - wantErr: true, - }, - { - desc: "anyone can spend", - policy: types.AnyoneCanSpend(), - sign: func(types.Hash256) []types.Signature { return nil }, - wantErr: false, - }, - { - desc: "multiple public key signatures", - policy: types.PolicyThreshold( - 3, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(0)), - types.PolicyPublicKey(pubkey(1)), - types.PolicyPublicKey(pubkey(2)), - }, - ), - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{ - privkey(0).SignHash(sigHash), - privkey(1).SignHash(sigHash), - privkey(2).SignHash(sigHash), - } - }, - wantErr: false, - }, - { - desc: "invalid foundation failsafe", - policy: types.PolicyThreshold( - 1, - []types.SpendPolicy{ - types.PolicyThreshold( - 2, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(0)), - types.PolicyPublicKey(pubkey(1)), - types.PolicyPublicKey(pubkey(2)), - }, - ), - // failsafe policy is not satisfied because the current height is 100 - types.PolicyThreshold( - 2, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(3)), - types.PolicyAbove(150), - }, - ), - }, - ), - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{privkey(3).SignHash(sigHash)} - }, - wantErr: true, - }, - { - desc: "valid foundation primary", - policy: types.PolicyThreshold( - 1, - []types.SpendPolicy{ - types.PolicyThreshold( - 2, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(0)), - types.PolicyPublicKey(pubkey(1)), - types.PolicyPublicKey(pubkey(2)), - }, - ), - // failsafe policy is not satisfied because the current height is 100 - types.PolicyThreshold( - 2, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(3)), - types.PolicyAbove(150), - }, - ), - }, - ), - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{ - privkey(1).SignHash(sigHash), - privkey(2).SignHash(sigHash), - } - }, - wantErr: false, - }, - { - desc: "valid foundation failsafe", - policy: types.PolicyThreshold( - 1, - []types.SpendPolicy{ - types.PolicyThreshold( - 2, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(0)), - types.PolicyPublicKey(pubkey(1)), - types.PolicyPublicKey(pubkey(2)), - }, - ), - // failsafe policy is satisfied because the current height is 100 - types.PolicyThreshold( - 2, - []types.SpendPolicy{ - types.PolicyPublicKey(pubkey(3)), - types.PolicyAbove(80), - }, - ), - }, - ), - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{privkey(3).SignHash(sigHash)} - }, - wantErr: false, - }, - { - desc: "invalid legacy unlock hash", - policy: types.SpendPolicy{Type: types.PolicyTypeUnlockConditions{ - PublicKeys: []types.PublicKey{ - pubkey(0), - pubkey(1), - pubkey(2), - }, - SignaturesRequired: 2, - }}, - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{ - privkey(0).SignHash(sigHash), - } - }, - wantErr: true, - }, - { - desc: "invalid timelocked legacy unlock conditions", - policy: types.SpendPolicy{Type: types.PolicyTypeUnlockConditions{ - PublicKeys: []types.PublicKey{ - pubkey(0), - }, - Timelock: 150, - SignaturesRequired: 1, - }}, - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{ - privkey(0).SignHash(sigHash), - } - }, - wantErr: true, - }, - { - desc: "valid legacy unlock hash", - policy: types.SpendPolicy{Type: types.PolicyTypeUnlockConditions{ - PublicKeys: []types.PublicKey{ - pubkey(0), - pubkey(1), - pubkey(2), - }, - SignaturesRequired: 2, - }}, - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{ - privkey(0).SignHash(sigHash), - privkey(1).SignHash(sigHash), - } - }, - wantErr: false, - }, - { - desc: "valid timelocked legacy unlock conditions", - policy: types.SpendPolicy{Type: types.PolicyTypeUnlockConditions{ - PublicKeys: []types.PublicKey{ - pubkey(0), - }, - Timelock: 80, - SignaturesRequired: 1, - }}, - sign: func(sigHash types.Hash256) []types.Signature { - return []types.Signature{privkey(0).SignHash(sigHash)} - }, - wantErr: false, - }, - } - - for _, tt := range tests { - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: types.SiacoinElement{ - SiacoinOutput: types.SiacoinOutput{ - Address: tt.policy.Address(), - }, - }, - SpendPolicy: tt.policy, - }}, - } - sigHash := s.InputSigHash(txn) - txn.SiacoinInputs[0].Signatures = tt.sign(sigHash) - if err := s.validateSpendPolicies(txn); (err != nil) != tt.wantErr { - t.Fatalf("case %q failed: %v", tt.desc, err) - } - } -} - -func TestValidateTransactionSet(t *testing.T) { - pubkey, privkey := testingKeypair(0) - genesisBlock := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(pubkey), - Value: types.Siacoins(1), - }) - // also add some SF - genesisBlock.Transactions[0].SiafundOutputs = []types.SiafundOutput{{ - Address: types.StandardAddress(pubkey), - Value: 100, - }} - sau := GenesisUpdate(genesisBlock, testingDifficulty) - s := sau.State - - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: sau.NewSiacoinElements[1], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Address: types.StandardAddress(pubkey), - Value: sau.NewSiacoinElements[1].Value, - }}, - SiafundInputs: []types.SiafundInput{{ - Parent: sau.NewSiafundElements[0], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiafundOutputs: []types.SiafundOutput{{ - Address: types.StandardAddress(pubkey), - Value: sau.NewSiafundElements[0].Value, - }}, - } - signAllInputs(&txn, s, privkey) - - if err := sau.State.ValidateTransactionSet([]types.Transaction{txn, txn}); err == nil { - t.Fatal("accepted transaction set with repeated txn") - } - - doubleSpendSCTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: sau.NewSiacoinElements[1], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Address: types.StandardAddress(pubkey), - Value: sau.NewSiacoinElements[1].Value, - }}, - } - signAllInputs(&doubleSpendSCTxn, s, privkey) - - if err := sau.State.ValidateTransactionSet([]types.Transaction{txn, doubleSpendSCTxn}); err == nil { - t.Fatal("accepted transaction set with double spent siacoin output") - } - - doubleSpendSFTxn := types.Transaction{ - SiafundInputs: []types.SiafundInput{{ - Parent: sau.NewSiafundElements[0], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiafundOutputs: []types.SiafundOutput{{ - Address: types.StandardAddress(pubkey), - Value: sau.NewSiafundElements[0].Value, - }}, - } - signAllInputs(&doubleSpendSFTxn, s, privkey) - - if err := sau.State.ValidateTransactionSet([]types.Transaction{txn, doubleSpendSFTxn}); err == nil { - t.Fatal("accepted transaction set with double spent siafund output") - } - - // overfill set with copies of txn - w := sau.State.TransactionWeight(txn) - txns := make([]types.Transaction, (sau.State.MaxBlockWeight()/w)+1) - for i := range txns { - txns[i] = txn - } - if err := sau.State.ValidateTransactionSet(txns); err == nil { - t.Fatal("accepted overweight transaction set") - } -} - -func TestValidateBlock(t *testing.T) { - pubkey, privkey := testingKeypair(0) - genesis := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: types.StandardAddress(pubkey), - Value: types.Siacoins(1), - }, types.SiacoinOutput{ - Address: types.StandardAddress(pubkey), - Value: types.Siacoins(1), - }) - sau := GenesisUpdate(genesis, testingDifficulty) - s := sau.State - - // Mine a block with a few transactions. We are not testing transaction - // validity here, but the block should still be valid. - txns := []types.Transaction{ - { - SiacoinInputs: []types.SiacoinInput{{ - Parent: sau.NewSiacoinElements[1], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{{ - Address: types.VoidAddress, - Value: sau.NewSiacoinElements[1].Value, - }}, - }, - { - SiacoinInputs: []types.SiacoinInput{{ - Parent: sau.NewSiacoinElements[2], - SpendPolicy: types.PolicyPublicKey(pubkey), - }}, - MinerFee: sau.NewSiacoinElements[2].Value, - }, - } - signAllInputs(&txns[0], s, privkey) - signAllInputs(&txns[1], s, privkey) - b := mineBlock(s, genesis, txns...) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - - tests := []struct { - desc string - corrupt func(*types.Block) - }{ - { - "incorrect header block height", - func(b *types.Block) { - b.Header.Height = 999 - }, - }, - { - "incorrect header parent ID", - func(b *types.Block) { - b.Header.ParentID[0] ^= 1 - }, - }, - { - "long-past header timestamp", - func(b *types.Block) { - b.Header.Timestamp = b.Header.Timestamp.Add(-24 * time.Hour) - }, - }, - { - "invalid commitment (different miner address)", - func(b *types.Block) { - b.Header.MinerAddress[0] ^= 1 - }, - }, - { - "invalid commitment (different transactions)", - func(b *types.Block) { - b.Transactions = b.Transactions[:1] - }, - }, - } - for _, test := range tests { - corruptBlock := b - test.corrupt(&corruptBlock) - if err := s.ValidateBlock(corruptBlock); err == nil { - t.Fatalf("accepted block with %v", test.desc) - } - } -} - -func TestNoDoubleContractUpdates(t *testing.T) { - renterPub, renterPriv := testingKeypair(0) - hostPub, hostPriv := testingKeypair(1) - renterAddr := types.StandardAddress(renterPub) - hostAddr := types.StandardAddress(hostPub) - genesis := genesisWithSiacoinOutputs(types.SiacoinOutput{ - Address: renterAddr, - Value: types.Siacoins(100), - }, types.SiacoinOutput{ - Address: hostAddr, - Value: types.Siacoins(100), - }) - sau := GenesisUpdate(genesis, testingDifficulty) - s := sau.State - - signRevision := func(fc *types.FileContract) { - sigHash := s.ContractSigHash(*fc) - fc.HostSignature = hostPriv.SignHash(sigHash) - fc.RenterSignature = renterPriv.SignHash(sigHash) - } - - // Mine a block with a new file contract. - fc := types.FileContract{ - WindowStart: 20, - WindowEnd: 30, - RenterOutput: types.SiacoinOutput{ - Address: renterAddr, - Value: types.Siacoins(5), - }, - HostOutput: types.SiacoinOutput{ - Address: hostAddr, - Value: types.Siacoins(10), - }, - TotalCollateral: types.ZeroCurrency, - RenterPublicKey: renterPub, - HostPublicKey: hostPub, - } - signRevision(&fc) - formationTxn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{ - {Parent: sau.NewSiacoinElements[1], SpendPolicy: types.PolicyPublicKey(renterPub)}, - {Parent: sau.NewSiacoinElements[2], SpendPolicy: types.PolicyPublicKey(hostPub)}, - }, - SiacoinOutputs: []types.SiacoinOutput{ - {Address: renterAddr, Value: types.Siacoins(90)}, - {Address: hostAddr, Value: types.Siacoins(95).Sub(s.FileContractTax(fc))}, - }, - FileContracts: []types.FileContract{fc}, - } - sigHash := s.InputSigHash(formationTxn) - formationTxn.SiacoinInputs[0].Signatures = []types.Signature{renterPriv.SignHash(sigHash)} - formationTxn.SiacoinInputs[1].Signatures = []types.Signature{hostPriv.SignHash(sigHash)} - b := mineBlock(s, genesis, formationTxn) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - s = sau.State - if len(sau.NewFileContracts) != 1 { - t.Fatal("expected 1 new file contract") - } - fce := sau.NewFileContracts[0] - - // mine additional blocks - for i := 0; i < 5; i++ { - b = mineBlock(s, b) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - s = sau.State - sau.UpdateElementProof(&fce.StateElement) - } - - // helper function to return a signed revision of the file contract with the - // given revision number. - newRevision := func(n uint64) types.FileContract { - fc := fce.FileContract - fc.RevisionNumber = n - signRevision(&fc) - return fc - } - - tests := [][]types.Transaction{ - { - { - FileContractRevisions: []types.FileContractRevision{ - {Parent: fce, Revision: newRevision(2)}, - }, - }, - { - FileContractRevisions: []types.FileContractRevision{ - {Parent: fce, Revision: newRevision(3)}, - }, - }, - }, - { - { - FileContractRevisions: []types.FileContractRevision{ - {Parent: fce, Revision: newRevision(2)}, - {Parent: fce, Revision: newRevision(3)}, - }, - }, - }, - { - { - FileContractRevisions: []types.FileContractRevision{ - {Parent: fce, Revision: newRevision(2)}, - }, - }, - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - }, - }, - }, - { - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - }, - }, - }, - } - - for i, set := range tests { - if err := s.ValidateBlock(mineBlock(s, b, set...)); err == nil { - t.Fatalf("test %v: expected invalid block error", i) - } else if !strings.Contains(err.Error(), "multiple times (previously updated in transaction") { // TODO: use errors.Is? - t.Fatalf("test %v: expected multiple update error, got %v", i, err) - } - } - - // apply a final revision - data := frand.Bytes(64 * 2) - revisionTxn := types.Transaction{ - FileContractRevisions: []types.FileContractRevision{ - {Parent: fce, Revision: fce.FileContract}, - }, - } - revisionTxn.FileContractRevisions[0].Revision.FileMerkleRoot = merkle.NodeHash( - merkle.StorageProofLeafHash(data[:64]), - merkle.StorageProofLeafHash(data[64:]), - ) - revisionTxn.FileContractRevisions[0].Revision.RevisionNumber++ - sigHash = s.ContractSigHash(revisionTxn.FileContractRevisions[0].Revision) - revisionTxn.FileContractRevisions[0].Revision.RenterSignature = renterPriv.SignHash(sigHash) - revisionTxn.FileContractRevisions[0].Revision.HostSignature = hostPriv.SignHash(sigHash) - b = mineBlock(s, b, revisionTxn) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - s = sau.State - if len(sau.RevisedFileContracts) != 1 { - t.Fatal("expected 1 revised file contract") - } else if !reflect.DeepEqual(sau.RevisedFileContracts[0].FileContract, revisionTxn.FileContractRevisions[0].Revision) { - t.Fatal("final revision did not match") - } - fce = sau.RevisedFileContracts[0] - - // mine until the contract proof window - for i := s.Index.Height; i < fc.WindowStart; i++ { - b = mineBlock(s, b) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - s = sau.State - sau.UpdateElementProof(&fce.StateElement) - } - - // build a vaild proof for the contract - proof := types.StorageProof{ - WindowStart: s.Index, - WindowProof: sau.HistoryProof(), - } - proofIndex := sau.State.StorageProofLeafIndex(fc.Filesize, proof.WindowStart, fce.ID) - copy(proof.Leaf[:], data[64*proofIndex:]) - if proofIndex == 0 { - proof.Proof = append(proof.Proof, merkle.StorageProofLeafHash(data[64:])) - } else { - proof.Proof = append(proof.Proof, merkle.StorageProofLeafHash(data[:64])) - } - - tests = [][]types.Transaction{ - { - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce, StorageProof: proof}, - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - }, - }, - }, - { - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce, StorageProof: proof}, - }, - }, - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce, StorageProof: proof}, - }, - }, - }, - { - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - }, - }, - }, - } - - for i, set := range tests { - if err := s.ValidateBlock(mineBlock(s, b, set...)); err == nil { - t.Fatalf("test %v: expected invalid block error", i) - } else if !strings.Contains(err.Error(), "multiple times (previously updated in transaction") { // TODO: use errors.Is? - t.Fatalf("test %v: expected multiple update error, got %v", i, err) - } - } - - // mine until after contract proof window - for i := s.Index.Height; i < fc.WindowEnd+1; i++ { - b = mineBlock(s, b) - if err := s.ValidateBlock(b); err != nil { - t.Fatal(err) - } - sau = ApplyBlock(s, b) - s = sau.State - sau.UpdateElementProof(&fce.StateElement) - } - - tests = [][]types.Transaction{ - { - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce}, - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - }, - }, - }, - { - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce}, - }, - }, - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce}, - }, - }, - }, - { - { - FileContractResolutions: []types.FileContractResolution{ - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - {Parent: fce, Finalization: newRevision(types.MaxRevisionNumber)}, - }, - }, - }, - } - - for i, set := range tests { - if err := s.ValidateBlock(mineBlock(s, b, set...)); err == nil { - t.Fatalf("test %v: expected invalid block error", i) - } else if !strings.Contains(err.Error(), "multiple times (previously updated in transaction") { // TODO: use errors.Is? - t.Fatalf("test %v: expected multiple update error, got %v", i, err) - } - } -} diff --git a/v2/go.mod b/v2/go.mod deleted file mode 100644 index e2ef819a..00000000 --- a/v2/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module go.sia.tech/core/v2 - -go 1.17 - -require ( - github.com/hdevalence/ed25519consensus v0.1.0 - go.sia.tech/mux v1.0.1 - golang.org/x/crypto v0.4.0 - golang.org/x/sys v0.3.0 - lukechampine.com/frand v1.4.2 -) - -require ( - filippo.io/edwards25519 v1.0.0 // indirect - github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect -) diff --git a/v2/go.sum b/v2/go.sum deleted file mode 100644 index adff3def..00000000 --- a/v2/go.sum +++ /dev/null @@ -1,45 +0,0 @@ -filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= -filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= -github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY= -github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= -github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= -github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.sia.tech/mux v1.0.1 h1:FuNOJwx+jwFZwprrfJ0AxdiAiMz0QJ3h1StSpJNiPaM= -go.sia.tech/mux v1.0.1/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= -lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= diff --git a/v2/host/budget.go b/v2/host/budget.go deleted file mode 100644 index e09c5e0d..00000000 --- a/v2/host/budget.go +++ /dev/null @@ -1,96 +0,0 @@ -package host - -import ( - "errors" - "fmt" - "io" - - "go.sia.tech/core/v2/net/rhp" - "go.sia.tech/core/v2/types" -) - -// A Budget provides helpers for managing the RPC budget. -type Budget struct { - value types.Currency -} - -var ( - // ErrInsufficientBudget is returned when the renter's budget is not - // sufficient to cover the payment. - ErrInsufficientBudget = errors.New("insufficient budget") -) - -// Remaining returns the amount remaining in the budget -func (b *Budget) Remaining() types.Currency { - return b.value -} - -// Spend subtracts amount from the remaining budget. -func (b *Budget) Spend(amount types.Currency) error { - if amount.Cmp(b.value) > 0 { - return fmt.Errorf("unable to spend %d, %d remaining: %w", amount, b.value, ErrInsufficientBudget) - } - b.value = b.value.Sub(amount) - return nil -} - -// Increase increases the budget by the specified amount. -func (b *Budget) Increase(amount types.Currency) { - b.value = b.value.Add(amount) -} - -// NewBudget returns a new Budget. -func NewBudget(value types.Currency) *Budget { - return &Budget{ - value: value, - } -} - -// A BudgetedStream limits reads and writes using an RPC budget. Writes -// subtract the download bandwidth price multiplied by the number of bytes -// written and reads subtract the upload bandwidth price multiplied by the -// number of bytes read. -type BudgetedStream struct { - rw io.ReadWriter - budget *Budget - - uploadBandwidthPrice types.Currency - downloadBandwidthPrice types.Currency -} - -// Read reads data from the underlying stream. Implements io.Reader. -func (l *BudgetedStream) Read(buf []byte) (n int, err error) { - n, err = l.rw.Read(buf) - if err != nil { - return - } - cost := l.uploadBandwidthPrice.Mul64(uint64(n)) - if err = l.budget.Spend(cost); err != nil { - return - } - return -} - -// Write writes data to the underlying stream. Implements io.Writer. -func (l *BudgetedStream) Write(buf []byte) (n int, err error) { - n, err = l.rw.Write(buf) - if err != nil { - return - } - cost := l.downloadBandwidthPrice.Mul64(uint64(n)) - if err = l.budget.Spend(cost); err != nil { - return - } - return -} - -// NewBudgetedStream initializes a new stream limited by the budget. -func NewBudgetedStream(rw io.ReadWriter, budget *Budget, settings rhp.HostSettings) *BudgetedStream { - return &BudgetedStream{ - rw: rw, - budget: budget, - - uploadBandwidthPrice: settings.UploadBandwidthPrice, - downloadBandwidthPrice: settings.DownloadBandwidthPrice, - } -} diff --git a/v2/host/budget_test.go b/v2/host/budget_test.go deleted file mode 100644 index 29aec571..00000000 --- a/v2/host/budget_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package host - -import ( - "bytes" - "errors" - "sync" - "testing" - - "go.sia.tech/core/v2/net/rhp" - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -type stubEphemeralAccountStore struct { - mu sync.Mutex - balances map[types.PublicKey]types.Currency -} - -func (s *stubEphemeralAccountStore) Balance(accountID types.PublicKey) (types.Currency, error) { - s.mu.Lock() - defer s.mu.Unlock() - return s.balances[accountID], nil -} - -func (s *stubEphemeralAccountStore) Deposit(accountID types.PublicKey, amount types.Currency) (types.Currency, error) { - s.mu.Lock() - defer s.mu.Unlock() - s.balances[accountID] = s.balances[accountID].Add(amount) - return s.balances[accountID], nil -} - -func (s *stubEphemeralAccountStore) Refund(accountID types.PublicKey, amount types.Currency) error { - s.mu.Lock() - defer s.mu.Unlock() - - s.balances[accountID] = s.balances[accountID].Add(amount) - return nil -} - -func (s *stubEphemeralAccountStore) Withdraw(accountID types.PublicKey, requestID types.Hash256, amount types.Currency) error { - s.mu.Lock() - defer s.mu.Unlock() - - bal, exists := s.balances[accountID] - if !exists || bal.Cmp(amount) < 0 { - return errors.New("insufficient funds") - } - - s.balances[accountID] = s.balances[accountID].Sub(amount) - return nil -} - -func newStubAccountStore() *stubEphemeralAccountStore { - return &stubEphemeralAccountStore{ - balances: make(map[types.PublicKey]types.Currency), - } -} - -func TestRPCBudget(t *testing.T) { - eas := newStubAccountStore() - - pub := types.PublicKey(frand.Entropy256()) - budget := NewBudget(types.Siacoins(2)) - - if budget.Remaining() != types.Siacoins(2) { - t.Fatal("expected 2 SC budget") - } - - // spend an amount greater than the budget - if err := budget.Spend(types.Siacoins(3)); err == nil { - t.Fatal("expected error when spending more than the budget") - } - - if balance, _ := eas.Balance(pub); balance != types.ZeroCurrency { - t.Fatal("expected account to be empty") - } - - // spend half the budget - if err := budget.Spend(types.Siacoins(1)); err != nil { - t.Fatal("expected to be able to spend half the budget:", err) - } - - eas.Refund(pub, budget.Remaining()) - - if balance, _ := eas.Balance(pub); balance != types.Siacoins(1) { - t.Fatal("expected account to be refunded 1 SC") - } -} - -func TestBudgetedStream(t *testing.T) { - budget := NewBudget(types.Siacoins(2)) - - if budget.Remaining().Cmp(types.Siacoins(2)) != 0 { - t.Fatal("expected 2 SC budget") - } - - settings := rhp.HostSettings{ - DownloadBandwidthPrice: types.Siacoins(1).Div64(100), // 1 SC per 100 bytes - UploadBandwidthPrice: types.Siacoins(1).Div64(100), // 1 SC per 100 bytes - } - - buf := bytes.NewBuffer(nil) - rw := NewBudgetedStream(buf, budget, settings) - - // write 3/4 of the budget - if _, err := rw.Write(frand.Bytes(150)); err != nil { - t.Fatal(err) - } - - if budget.Remaining() != types.Siacoins(1).Div64(2) { - t.Fatalf("expected 1 SC remaining, got %d", budget.Remaining()) - } - - // read the remaining budget - if _, err := rw.Read(make([]byte, 50)); err != nil { - t.Fatal(err) - } - - if budget.Remaining() != types.ZeroCurrency { - t.Fatal("expected 0 SC remaining") - } - - // overflow the budget - if _, err := rw.Read(make([]byte, 51)); !errors.Is(err, ErrInsufficientBudget) { - t.Fatal("expected insufficient budget error") - } -} diff --git a/v2/host/executor.go b/v2/host/executor.go deleted file mode 100644 index 43f5c383..00000000 --- a/v2/host/executor.go +++ /dev/null @@ -1,530 +0,0 @@ -package host - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/net/rhp" - "go.sia.tech/core/v2/net/rpc" - "go.sia.tech/core/v2/types" -) - -const ( - blocksPerYear = 144 * 365 -) - -// A ProgramExecutor executes an MDM program in the context of the current -// host session. -type ProgramExecutor struct { - privkey types.PrivateKey - - newFileSize uint64 - newMerkleRoot types.Hash256 - newRoots []types.Hash256 - - // gainedSectors counts the number of references a sector has gained - // through append or update instructions. When a program is reverted all - // references must be removed. - gainedSectors map[types.Hash256]uint64 - // removedSectors counts the number of references a sector has lost through - // update or drop instructions. When a program is committed all references - // must be removed. - removedSectors map[types.Hash256]uint64 - - // output should not be written to directly, instead write to the encoder. - output bytes.Buffer - encoder *types.Encoder - - budget *Budget - spent types.Currency - failureRefund types.Currency - additionalStorage types.Currency - additionalCollateral types.Currency - - sectors SectorStore - contracts ContractManager - registry *RegistryManager - cs consensus.State - settings rhp.HostSettings - duration uint64 - contract rhp.Contract - - committed bool -} - -// payForExecution deducts the cost of the instruction from the budget. -func (pe *ProgramExecutor) payForExecution(usage rhp.ResourceUsage) error { - cost := usage.BaseCost.Add(usage.StorageCost) - - // subtract the execution cost and additional storage costs from the budget. - if err := pe.budget.Spend(cost); err != nil { - return fmt.Errorf("failed to pay for execution: %w", err) - } - - // add the additional spending to the program's state. - pe.spent = pe.spent.Add(cost) - pe.failureRefund = pe.failureRefund.Add(usage.StorageCost) - pe.additionalCollateral = pe.additionalCollateral.Add(usage.AdditionalCollateral) - pe.additionalStorage = pe.additionalStorage.Add(usage.StorageCost) - return nil -} - -// executeHasSector checks if the host is storing the sector. -func (pe *ProgramExecutor) executeHasSector(root types.Hash256) error { - if err := pe.payForExecution(rhp.HasSectorCost(pe.settings)); err != nil { - return fmt.Errorf("failed to pay instruction cost: %w", err) - } - - // check if the sector exists in the sector store. - exists, err := pe.sectors.Exists(root) - if err != nil { - return fmt.Errorf("failed to check sector existence: %w", err) - } - // output the boolean existence of the sector, 0 for false, 1 for true. - pe.encoder.WriteBool(exists) - return nil -} - -// executeAppendSector appends a new sector to the executor's sector roots and -// adds it to the sector store. -func (pe *ProgramExecutor) executeAppendSector(root types.Hash256, sector *[rhp.SectorSize]byte, requiresProof bool) ([]types.Hash256, error) { - if err := pe.payForExecution(rhp.AppendSectorCost(pe.settings, pe.duration)); err != nil { - return nil, fmt.Errorf("failed to pay append sector cost: %w", err) - } - - if err := pe.sectors.Add(root, sector); err != nil { - return nil, fmt.Errorf("failed to add sector: %w", err) - } - - // update the program's state - pe.newRoots = append(pe.newRoots, root) - pe.newMerkleRoot = rhp.MetaRoot(pe.newRoots) - pe.newFileSize += rhp.SectorSize - pe.gainedSectors[root]++ - // TODO: calculate optional proof. - return nil, nil -} - -// executeUpdateSector updates an existing sector. -func (pe *ProgramExecutor) executeUpdateSector(offset uint64, data []byte, requiresProof bool) ([]types.Hash256, error) { - if err := pe.payForExecution(rhp.UpdateSectorCost(pe.settings, uint64(len(data)))); err != nil { - return nil, fmt.Errorf("failed to pay instruction cost: %w", err) - } - - index := offset / rhp.SectorSize - if index >= uint64(len(pe.newRoots)) { - return nil, fmt.Errorf("offset out of range: %d", index) - } - existingRoot := pe.newRoots[index] - offset %= rhp.SectorSize - - // update the sector in the sector store. - updatedRoot, err := pe.sectors.Update(existingRoot, offset, data) - if err != nil { - return nil, fmt.Errorf("failed to update sector: %w", err) - } - // update the program state - pe.newRoots[index] = updatedRoot - pe.newMerkleRoot = rhp.MetaRoot(pe.newRoots) - pe.gainedSectors[updatedRoot]++ - pe.removedSectors[existingRoot]++ - // TODO: calculate optional proof. - return nil, nil -} - -// executeDropSectors drops the last n sectors from the executor's sector roots. -func (pe *ProgramExecutor) executeDropSectors(dropped uint64, requiresProof bool) ([]types.Hash256, error) { - if err := pe.payForExecution(rhp.DropSectorsCost(pe.settings, dropped)); err != nil { - return nil, fmt.Errorf("failed to pay instruction cost: %w", err) - } else if uint64(len(pe.newRoots)) < dropped { - return nil, errors.New("dropped sector index out of range") - } - - // get the roots of the sectors to be dropped. - i := len(pe.newRoots) - int(dropped) - droppedRoots := pe.newRoots[i:] - // update the program's contract state - pe.newRoots = pe.newRoots[:i] - pe.newMerkleRoot = rhp.MetaRoot(pe.newRoots) - pe.newFileSize = uint64(len(pe.newRoots)) * rhp.SectorSize - // remove a reference of each dropped sector. - for _, root := range droppedRoots { - pe.removedSectors[root]++ - } - // TODO: calculate optional proof. - return nil, nil -} - -// executeSwapSectors swaps two sectors in the executor's sector roots. -func (pe *ProgramExecutor) executeSwapSectors(indexA, indexB uint64, requiresProof bool) ([]types.Hash256, error) { - if err := pe.payForExecution(rhp.SwapSectorCost(pe.settings)); err != nil { - return nil, fmt.Errorf("failed to pay instruction cost: %w", err) - } else if indexA >= uint64(len(pe.newRoots)) { - return nil, fmt.Errorf("sector 1 index out of range %v", indexA) - } else if indexB >= uint64(len(pe.newRoots)) { - return nil, fmt.Errorf("sector 2 index out of range %v", indexB) - } - - // swap the sector roots. - pe.newRoots[indexA], pe.newRoots[indexB] = pe.newRoots[indexB], pe.newRoots[indexA] - // update the program's contract state - pe.newMerkleRoot = rhp.MetaRoot(pe.newRoots) - pe.newRoots[indexA].EncodeTo(pe.encoder) - pe.newRoots[indexA].EncodeTo(pe.encoder) - - // TODO: calculate optional proof. - return nil, nil -} - -// executeReadSector reads a sector from the host. Returning the bytes read, an -// optional proof, or an error. -func (pe *ProgramExecutor) executeReadSector(root types.Hash256, offset, length uint64, requiresProof bool) ([]types.Hash256, error) { - if err := pe.payForExecution(rhp.ReadCost(pe.settings, length)); err != nil { - return nil, fmt.Errorf("failed to pay instruction cost: %w", err) - } else if offset+length > rhp.SectorSize { - return nil, errors.New("offset and length exceed sector size") - } - - _, err := pe.sectors.Read(root, pe.encoder, offset, length) - if err != nil { - return nil, fmt.Errorf("failed to read sector: %w", err) - } - // TODO: calculate optional proof. - return nil, nil -} - -// executeContractRevision returns the latest revision of the contract before -// any instructions have been executed. -func (pe *ProgramExecutor) executeContractRevision() error { - if err := pe.payForExecution(rhp.RevisionCost(pe.settings)); err != nil { - return fmt.Errorf("failed to pay instruction cost: %w", err) - } else if pe.contract.ID == (types.ElementID{}) { - return errors.New("no contract revision set") - } - - pe.contract.EncodeTo(pe.encoder) - return nil -} - -// executeSectorRoots returns the current sector roots of the program executor. -func (pe *ProgramExecutor) executeSectorRoots() error { - if err := pe.payForExecution(rhp.SectorRootsCost(pe.settings, uint64(len(pe.newRoots)))); err != nil { - return fmt.Errorf("failed to pay instruction cost: %w", err) - } else if pe.contract.ID == (types.ElementID{}) { - return errors.New("no contract revision set") - } - - // write the sector roots to the encoder. - pe.encoder.WritePrefix(len(pe.newRoots)) - for _, root := range pe.newRoots { - root.EncodeTo(pe.encoder) - } - return nil -} - -// executeReadRegistry reads a stored registry key and returns the value. -func (pe *ProgramExecutor) executeReadRegistry(key types.Hash256) error { - if err := pe.payForExecution(rhp.ReadRegistryCost(pe.settings)); err != nil { - return fmt.Errorf("failed to pay instruction cost: %w", err) - } - - value, err := pe.registry.Get(key) - if err != nil { - return fmt.Errorf("failed to get registry value %v: %w", key, err) - } - value.EncodeTo(pe.encoder) - return nil -} - -// executeUpdateRegistry updates a stored registry key with a new value. -func (pe *ProgramExecutor) executeUpdateRegistry(value rhp.RegistryValue) error { - err := pe.payForExecution(rhp.UpdateRegistryCost(pe.settings)) - if err != nil { - return fmt.Errorf("failed to pay instruction cost: %w", err) - } else if err := rhp.ValidateRegistryEntry(value); err != nil { - return fmt.Errorf("invalid registry value: %w", err) - } - expirationHeight := pe.cs.Index.Height + blocksPerYear - updated, err := pe.registry.Put(value, expirationHeight) - // if err is nil the updated value is returned, otherwise the old value is - // returned. Send the entry's current value to the renter. - updated.EncodeTo(pe.encoder) - return err -} - -// SetContract sets the contract that read-write programs should use for -// finalization. The contract should be locked before calling this function. -func (pe *ProgramExecutor) SetContract(contract rhp.Contract) error { - // set initial state of the program. - pe.contract = contract - pe.newFileSize = contract.Revision.Filesize - // use height from price table to calculate remaining duration. - pe.duration = contract.Revision.WindowStart - pe.settings.BlockHeight - - roots, err := pe.contracts.Roots(contract.ID) - if err != nil { - return fmt.Errorf("failed to get contract roots: %w", err) - } - pe.newMerkleRoot = rhp.MetaRoot(roots) - pe.newRoots = append([]types.Hash256(nil), roots...) - return nil -} - -// ExecuteInstruction executes the given instruction, reading program data from -// r as needed, and writing the result of the instruction to w. -// -// note: Unlike siad's MDM, this implementation does not check the data offsets -// in the instruction arguments. It is assumed the program data is well-formed, -// meaning each argument appears in the program data in the order it is needed. -// Malformed programs may lead to unexpected behavior, but there is no need to -// buffer the program's data in memory during execution. Changing an -// instructions arguments would also cause programs to be unexecutable, so this -// seems like an acceptable trade-off. Should consider removing the offsets from -// the instruction arguments. -func (pe *ProgramExecutor) ExecuteInstruction(r io.Reader, w io.Writer, instruction rhp.Instruction) error { - if pe.committed { - panic("cannot modify a committed program") - } - - // reset the output buffer - pe.encoder.Flush() - pe.output.Reset() - - proof, err := func() ([]types.Hash256, error) { - switch instr := instruction.(type) { - case *rhp.InstrAppendSector: - // read the sector data. - root, sector, err := rhp.ReadSector(r) - if err != nil { - return nil, fmt.Errorf("failed to read sector data: %w", err) - } - return pe.executeAppendSector(root, sector, instr.ProofRequired) - case *rhp.InstrUpdateSector: - if instr.Length > rhp.SectorSize { - return nil, fmt.Errorf("data length exceeds sector size") - } - data := make([]byte, instr.Length) - if _, err := io.ReadFull(r, data); err != nil { - return nil, fmt.Errorf("failed to read update data: %w", err) - } - return pe.executeUpdateSector(instr.Offset, data, instr.ProofRequired) - case *rhp.InstrDropSectors: - var dropped uint64 - if err := binary.Read(r, binary.LittleEndian, &dropped); err != nil { - return nil, fmt.Errorf("failed to read dropped sector count: %w", err) - } - return pe.executeDropSectors(dropped, instr.ProofRequired) - case *rhp.InstrHasSector: - // read the sector root from the program's data - var root types.Hash256 - if _, err := io.ReadFull(r, root[:]); err != nil { - return nil, fmt.Errorf("failed to read sector root: %w", err) - } - - return nil, pe.executeHasSector(root) - case *rhp.InstrReadSector: - var root types.Hash256 - var offset, length uint64 - - // read the root from the program's data - if _, err := io.ReadFull(r, root[:]); err != nil { - return nil, fmt.Errorf("failed to read sector root: %w", err) - } - - // read the offset and length from the program's data - if err := binary.Read(r, binary.LittleEndian, &offset); err != nil { - return nil, fmt.Errorf("failed to read sector offset: %w", err) - } - if err := binary.Read(r, binary.LittleEndian, &length); err != nil { - return nil, fmt.Errorf("failed to read sector length: %w", err) - } - - return pe.executeReadSector(root, offset, length, instr.ProofRequired) - case *rhp.InstrReadOffset: - var offset, length uint64 - - if err := binary.Read(r, binary.LittleEndian, &offset); err != nil { - return nil, fmt.Errorf("failed to read offset: %w", err) - } - - if err := binary.Read(r, binary.LittleEndian, &length); err != nil { - return nil, fmt.Errorf("failed to read length: %w", err) - } - - index := offset / rhp.SectorSize - if index >= uint64(len(pe.newRoots)) { - return nil, fmt.Errorf("offset out of range: %d", index) - } - - root := pe.newRoots[index] - offset %= rhp.SectorSize - return pe.executeReadSector(root, offset, length, instr.ProofRequired) - case *rhp.InstrSwapSector: - var sectorA, sectorB uint64 - - if err := binary.Read(r, binary.LittleEndian, §orA); err != nil { - return nil, fmt.Errorf("failed to read sector A index: %w", err) - } - - if err := binary.Read(r, binary.LittleEndian, §orB); err != nil { - return nil, fmt.Errorf("failed to read sector B index: %w", err) - } - - return pe.executeSwapSectors(sectorA, sectorB, instr.ProofRequired) - case *rhp.InstrContractRevision: - return nil, pe.executeContractRevision() - case *rhp.InstrSectorRoots: - return nil, pe.executeSectorRoots() - case *rhp.InstrReadRegistry: - // read the registry entry - var pub types.PublicKey - var tweak types.Hash256 - dec := types.NewDecoder(io.LimitedReader{R: r, N: 64}) - pub.DecodeFrom(dec) - tweak.DecodeFrom(dec) - if err := dec.Err(); err != nil { - return nil, fmt.Errorf("failed to decode instruction: %w", dec.Err()) - } - - // read the registry value - key := rhp.RegistryKey(pub, tweak) - return nil, pe.executeReadRegistry(key) - case *rhp.InstrUpdateRegistry: - var value rhp.RegistryValue - dec := types.NewDecoder(io.LimitedReader{R: r, N: int64(value.MaxLen())}) - value.DecodeFrom(dec) - if err := dec.Err(); err != nil { - return nil, fmt.Errorf("failed to decode instruction data: %w", dec.Err()) - } - - // update the registry value - return nil, pe.executeUpdateRegistry(value) - default: - return nil, fmt.Errorf("unknown instruction: %T", instruction) - } - }() - - if err := pe.encoder.Flush(); err != nil { - return fmt.Errorf("failed to flush encoder: %w", err) - } - - resp := &rhp.RPCExecuteInstrResponse{ - AdditionalCollateral: pe.additionalCollateral, - AdditionalStorage: pe.additionalStorage, - TotalCost: pe.spent, - FailureRefund: pe.failureRefund, - - NewDataSize: pe.newFileSize, - NewMerkleRoot: pe.newMerkleRoot, - Proof: proof, - OutputLength: uint64(pe.output.Len()), - - Error: err, - } - - if err := rpc.WriteResponse(w, resp); err != nil { - return fmt.Errorf("failed to write response: %w", err) - } else if _, err := pe.output.WriteTo(w); err != nil { - return fmt.Errorf("failed to write output: %w", err) - } - - return resp.Error -} - -// FinalizeContract updates the contract to reflect the final state of the -// program. -func (pe *ProgramExecutor) FinalizeContract(req rhp.RPCFinalizeProgramRequest) (rhp.Contract, error) { - revision := pe.contract.Revision - revision.RevisionNumber = req.NewRevisionNumber - req.NewOutputs.Apply(&revision) - // update the contract's merkle root and file size. - revision.FileMerkleRoot = pe.newMerkleRoot - revision.Filesize = pe.newFileSize - - // validate that the renter's revision is valid and only transfers the - // additional collateral and storage costs to the void. All other - // costs have already been paid by the RPC budget. - if err := rhp.ValidateProgramRevision(pe.contract.Revision, revision, pe.additionalStorage, pe.additionalCollateral); err != nil { - return rhp.Contract{}, fmt.Errorf("failed to validate program revision: %w", err) - } - - sigHash := pe.cs.ContractSigHash(revision) - if !pe.contract.Revision.RenterPublicKey.VerifyHash(sigHash, req.Signature) { - return rhp.Contract{}, errors.New("invalid renter signature") - } - revision.RenterSignature = req.Signature - revision.HostSignature = pe.privkey.SignHash(sigHash) - pe.contract.Revision = revision - - if err := pe.contracts.Revise(pe.contract); err != nil { - return rhp.Contract{}, fmt.Errorf("failed to revise contract: %w", err) - } else if err := pe.contracts.SetRoots(pe.contract.ID, pe.newRoots); err != nil { - return rhp.Contract{}, fmt.Errorf("failed to set new roots: %w", err) - } - return pe.contract, nil -} - -// Revert removes the sectors that were added by the program. If -// commit has already been called, this function is a no-op. -func (pe *ProgramExecutor) Revert() error { - if pe.committed { - return nil - } - - // delete the sectors added by the program. - for root, refs := range pe.gainedSectors { - if err := pe.sectors.Delete(root, refs); err != nil { - return fmt.Errorf("failed to remove sector: %w", err) - } - } - - // increase the budget by the failure refund. This will refund the storage - // costs from executing the program to the renter. - pe.budget.Increase(pe.failureRefund) - return nil -} - -// Commit removes any sectors that were removed by the program and -// sets the failure refund to zero. If commit has already been called this -// function is a no-op. -func (pe *ProgramExecutor) Commit() error { - if pe.committed { - return nil - } - // delete sectors removed by the program. - for root, refs := range pe.removedSectors { - if err := pe.sectors.Delete(root, refs); err != nil { - return fmt.Errorf("failed to remove sector: %w", err) - } - } - - // all program ops are now committed, set the failure refund to zero. - pe.failureRefund = types.ZeroCurrency - pe.committed = true - return nil -} - -// NewExecutor initializes the program's executor. -func NewExecutor(priv types.PrivateKey, ss SectorStore, cm ContractManager, rm *RegistryManager, cs consensus.State, settings rhp.HostSettings, budget *Budget) *ProgramExecutor { - pe := &ProgramExecutor{ - settings: settings, - budget: budget, - duration: 1, - - privkey: priv, - sectors: ss, - registry: rm, - contracts: cm, - cs: cs, - - gainedSectors: make(map[types.Hash256]uint64), - removedSectors: make(map[types.Hash256]uint64), - } - pe.encoder = types.NewEncoder(&pe.output) - - return pe -} diff --git a/v2/host/host.go b/v2/host/host.go deleted file mode 100644 index 51eb56dc..00000000 --- a/v2/host/host.go +++ /dev/null @@ -1,128 +0,0 @@ -package host - -import ( - "errors" - "io" - "time" - - "go.sia.tech/core/v2/chain" - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/net/rhp" - "go.sia.tech/core/v2/types" -) - -var ( - // ErrEntryNotFound should be returned when a registry key does not exist - // in the registry. - ErrEntryNotFound = errors.New("entry not found") -) - -type ( - // A SectorStore stores contract sector data. Implementations are expected - // to handle multiple references to a sector for add and delete operations. - SectorStore interface { - // Add adds the sector with the specified root to the store. - Add(root types.Hash256, sector *[rhp.SectorSize]byte) error - // Delete removes a number of references to a sector from the store. - // If a sector has no more references, it should be removed from the - // store. - Delete(root types.Hash256, references uint64) error - // Exists checks if the sector exists in the store. - Exists(root types.Hash256) (bool, error) - // Read reads the sector with the given root, offset and length - // into w. Returns the number of bytes read or an error. - Read(root types.Hash256, w io.Writer, offset, length uint64) (n uint64, err error) - // Update copies an existing sector with the specified root and adds a - // new sector to the store with the data at offset overwritten, - // returning the Merkle root of the new sector. - Update(root types.Hash256, offset uint64, data []byte) (types.Hash256, error) - } - - // An EphemeralAccountStore manages ephemeral account balances. - EphemeralAccountStore interface { - // Balance returns the balance of the account with the given ID. - Balance(accountID types.PublicKey) (types.Currency, error) - // Credit adds the specified amount to the account with the given ID. - // May be limited by MaxEphemeralAccountBalance setting. - Credit(accountID types.PublicKey, amount types.Currency) (types.Currency, error) - // Debit subtracts the specified amount from the account with the given - // ID. requestID may be used to uniquely identify and prevent duplicate - // debit requests. Returns the remaining balance of the account. - Debit(accountID types.PublicKey, requestID types.Hash256, amount types.Currency) (types.Currency, error) - // Refund refunds the specified amount to the account with the given ID, - // should not be limited by MaxEphemeralAccountBalance setting. - Refund(accountID types.PublicKey, amount types.Currency) error - } - - // RegistryStore stores host registry entries. The registry is a key/value - // store for small data. - RegistryStore interface { - // Get returns the registry value for the given key. If the key is not - // found should return ErrEntryNotFound. - Get(types.Hash256) (rhp.RegistryValue, error) - // Set sets the registry value for the given key. - Set(key types.Hash256, value rhp.RegistryValue, expiration uint64) (rhp.RegistryValue, error) - // Len returns the number of entries in the registry. - Len() uint64 - // Cap returns the maximum number of entries the registry can hold. - Cap() uint64 - } - - // A ContractStore stores contracts, metadata, and proofs for the host. - ContractStore interface { - chain.Subscriber - - // Exists returns true if the contract is in the store. - Exists(types.ElementID) bool - // Get returns the contract with the given ID. - Get(types.ElementID) (rhp.Contract, error) - // Add stores the provided contract, overwriting any previous contract - // with the same ID. - Add(rhp.Contract, types.Transaction) error - // ReviseContract updates the current revision associated with a contract. - Revise(rhp.Contract) error - - // Roots returns the roots of all sectors stored by the contract. - Roots(types.ElementID) ([]types.Hash256, error) - // SetRoots sets the stored roots of the contract. - SetRoots(types.ElementID, []types.Hash256) error - } - - // A ContractManager manages a hosts active contracts. - ContractManager interface { - // Lock locks a contract for modification. - Lock(types.ElementID, time.Duration) (rhp.Contract, error) - // Unlock unlocks a locked contract. - Unlock(types.ElementID) - // Add stores the provided contract, overwriting any previous contract - // with the same ID. - Add(rhp.Contract, types.Transaction) error - // ReviseContract updates the current revision associated with a contract. - Revise(rhp.Contract) error - - // Roots returns the roots of all sectors stored by the contract. - Roots(types.ElementID) ([]types.Hash256, error) - // SetRoots updates the roots of the contract. - SetRoots(types.ElementID, []types.Hash256) error - } - - // A SettingsReporter returns the host's current settings. - SettingsReporter interface { - Settings() rhp.HostSettings - } - - // A TransactionPool broadcasts transaction sets to miners for inclusion in - // an upcoming block. - TransactionPool interface { - AddTransaction(txn types.Transaction) error - RecommendedFee() types.Currency - } - - // A Wallet provides addresses and funds and signs transactions. - Wallet interface { - Address() types.Address - SpendPolicy(types.Address) (types.SpendPolicy, bool) - FundTransaction(txn *types.Transaction, amount types.Currency, pool []types.Transaction) ([]types.ElementID, func(), error) - SignTransaction(cs consensus.State, txn *types.Transaction, toSign []types.ElementID) error - } -) diff --git a/v2/host/registry.go b/v2/host/registry.go deleted file mode 100644 index a9eee5f0..00000000 --- a/v2/host/registry.go +++ /dev/null @@ -1,64 +0,0 @@ -package host - -import ( - "errors" - "fmt" - "sync" - - "go.sia.tech/core/v2/net/rhp" - "go.sia.tech/core/v2/types" -) - -// A RegistryManager manages registry entries stored in a RegistryStore. -type RegistryManager struct { - hostID types.Hash256 - store RegistryStore - - // registry entries must be locked while they are being modified - mu sync.Mutex -} - -// Get returns the registry value for the provided key. -func (r *RegistryManager) Get(key types.Hash256) (rhp.RegistryValue, error) { - r.mu.Lock() - defer r.mu.Unlock() - return r.store.Get(key) -} - -// Put creates or updates the registry value for the provided key. If err is nil -// the new value is returned, otherwise the previous value is returned. -func (r *RegistryManager) Put(value rhp.RegistryValue, expirationHeight uint64) (rhp.RegistryValue, error) { - r.mu.Lock() - defer r.mu.Unlock() - - if err := rhp.ValidateRegistryEntry(value); err != nil { - return rhp.RegistryValue{}, fmt.Errorf("invalid registry entry: %w", err) - } - - // get the current value. - key := value.Key() - old, err := r.store.Get(key) - // if the key doesn't exist, we don't need to validate it further. - if errors.Is(err, ErrEntryNotFound) { - if _, err = r.store.Set(key, value, expirationHeight); err != nil { - return value, fmt.Errorf("failed to create registry key: %w", err) - } - return value, nil - } else if err != nil { - return old, fmt.Errorf("failed to get registry value: %w", err) - } - - if err := rhp.ValidateRegistryUpdate(old, value, r.hostID); err != nil { - return old, fmt.Errorf("invalid registry update: %w", err) - } - - return r.store.Set(key, value, expirationHeight) -} - -// NewRegistryManager returns a new registry manager. -func NewRegistryManager(privkey types.PrivateKey, store RegistryStore) *RegistryManager { - return &RegistryManager{ - hostID: rhp.RegistryHostID(privkey.PublicKey()), - store: store, - } -} diff --git a/v2/host/registry_test.go b/v2/host/registry_test.go deleted file mode 100644 index 8bcd97f1..00000000 --- a/v2/host/registry_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package host - -import ( - "errors" - "reflect" - "sync" - "testing" - - "go.sia.tech/core/v2/net/rhp" - "go.sia.tech/core/v2/types" - "lukechampine.com/frand" -) - -type ephemeralRegistryStore struct { - mu sync.Mutex - - cap uint64 - values map[types.Hash256]rhp.RegistryValue -} - -// Get returns the registry value for the given key. If the key is not found -// should return renterhost.ErrNotFound. -func (er *ephemeralRegistryStore) Get(key types.Hash256) (rhp.RegistryValue, error) { - er.mu.Lock() - defer er.mu.Unlock() - - val, exists := er.values[key] - if !exists { - return rhp.RegistryValue{}, ErrEntryNotFound - } - return val, nil -} - -// Set sets the registry value for the given key. -func (er *ephemeralRegistryStore) Set(key types.Hash256, value rhp.RegistryValue, expiration uint64) (rhp.RegistryValue, error) { - er.mu.Lock() - defer er.mu.Unlock() - - if _, exists := er.values[key]; !exists && uint64(len(er.values)) >= er.cap { - return rhp.RegistryValue{}, errors.New("capacity exceeded") - } - - er.values[key] = value - return value, nil -} - -// Len returns the number of entries in the registry. -func (er *ephemeralRegistryStore) Len() uint64 { - er.mu.Lock() - defer er.mu.Unlock() - - return uint64(len(er.values)) -} - -// Cap returns the maximum number of entries the registry can hold. -func (er *ephemeralRegistryStore) Cap() uint64 { - return er.cap -} - -func newEphemeralRegistryStore(limit uint64) *ephemeralRegistryStore { - return &ephemeralRegistryStore{ - cap: limit, - values: make(map[types.Hash256]rhp.RegistryValue), - } -} - -func randomRegistryValue(key types.PrivateKey) (value rhp.RegistryValue) { - value.Tweak = frand.Entropy256() - value.Data = frand.Bytes(32) - value.Type = rhp.EntryTypeArbitrary - value.PublicKey = key.PublicKey() - value.Signature = key.SignHash(value.Hash()) - return -} - -func testRegistry(priKey types.PrivateKey, limit uint64) *RegistryManager { - return NewRegistryManager(priKey, newEphemeralRegistryStore(limit)) -} - -func TestRegistryPut(t *testing.T) { - const registryCap = 10 - hostPriv := types.GeneratePrivateKey() - renterPriv := types.GeneratePrivateKey() - reg := testRegistry(hostPriv, registryCap) - - // store a random value in the registry - original := randomRegistryValue(renterPriv) - updated, err := reg.Put(original, registryCap) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(original, updated) { - t.Fatal("expected returned value to match") - } - - // test storing the same value again; should fail and return the original - // value - updated, err = reg.Put(original, 10) - if err == nil { - t.Fatalf("expected validation error") - } else if !reflect.DeepEqual(original, updated) { - t.Fatal("expected returned value to match") - } - - // test updating the value's revision number and data; should succeed - value := rhp.RegistryValue{ - Tweak: original.Tweak, - Data: original.Data, - Revision: 1, - Type: rhp.EntryTypeArbitrary, - PublicKey: renterPriv.PublicKey(), - } - value.Signature = renterPriv.SignHash(value.Hash()) - updated, err = reg.Put(value, 10) - if err != nil { - t.Fatalf("expected update to succeed, got %s", err) - } else if !reflect.DeepEqual(value, updated) { - t.Fatal("expected returned value to match new value") - } - - // test updating the value's work; should succeed - value = rhp.RegistryValue{ - Tweak: original.Tweak, - Data: make([]byte, 32), - Revision: 1, - Type: rhp.EntryTypeArbitrary, - PublicKey: renterPriv.PublicKey(), - } - var i int - for i = 0; i < 1e6; i++ { - frand.Read(value.Data) - if value.Work().Cmp(updated.Work()) > 0 { - break - } - } - value.Signature = renterPriv.SignHash(value.Hash()) - updated, err = reg.Put(value, 10) - if err != nil { - t.Fatalf("expected update to succeed, got %s", err) - } else if !reflect.DeepEqual(value, updated) { - t.Fatal("expected returned value to match new value") - } - - // test setting the value to a primary value; should succeed - hostID := rhp.RegistryHostID(hostPriv.PublicKey()) - value = rhp.RegistryValue{ - Tweak: original.Tweak, - Data: append([]byte(hostID[:20]), updated.Data...), - Revision: 1, - Type: rhp.EntryTypePubKey, - PublicKey: renterPriv.PublicKey(), - } - value.Signature = renterPriv.SignHash(value.Hash()) - updated, err = reg.Put(value, 10) - if err != nil { - t.Fatalf("expected update to succeed, got %s", err) - } else if !reflect.DeepEqual(value, updated) { - t.Fatal("expected returned value to match new value") - } - - // fill the registry - for i := 0; i < registryCap-1; i++ { - _, err := reg.Put(randomRegistryValue(renterPriv), 10) - if err != nil { - t.Fatalf("failed on entry %d: %s", i, err) - } - } - - // test storing a value that would exceed the registry capacity; should fail - _, err = reg.Put(randomRegistryValue(renterPriv), 10) - if err == nil { - t.Fatalf("expected cap error") - } -} diff --git a/v2/internal/blake2b/blake2b.go b/v2/internal/blake2b/blake2b.go deleted file mode 100644 index 17041bd1..00000000 --- a/v2/internal/blake2b/blake2b.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package blake2b implements the BLAKE2b cryptographic hash function, -// with optimized variants for hashing Merkle tree inputs. -package blake2b - -import ( - "unsafe" - - "golang.org/x/crypto/blake2b" -) - -// from RFC 6962 -const leafHashPrefix = 0 -const nodeHashPrefix = 1 - -// SumLeaf computes the Merkle tree leaf hash of a single leaf. -func SumLeaf(leaf *[64]byte) [32]byte { - return hashBlock(leaf, leafHashPrefix) -} - -// SumPair computes the Merkle root of a pair of node hashes. -func SumPair(left, right [32]byte) [32]byte { - return hashBlock((*[64]byte)(unsafe.Pointer(&[2][32]byte{left, right})), nodeHashPrefix) -} - -// SumLeaves computes the Merkle tree leaf hash of four leaves, storing the -// results in outs. -func SumLeaves(outs *[4][32]byte, leaves *[4][64]byte) { - hashBlocks(outs, leaves, leafHashPrefix) -} - -// SumNodes computes the Merkle roots of four pairs of node hashes, storing the -// results in outs. -func SumNodes(outs *[4][32]byte, nodes *[8][32]byte) { - hashBlocks(outs, (*[4][64]byte)(unsafe.Pointer(nodes)), nodeHashPrefix) -} - -func hashBlockGeneric(msg *[64]byte, prefix uint64) [32]byte { - var buf [65]byte - buf[0] = byte(prefix) - copy(buf[1:], msg[:]) - return blake2b.Sum256(buf[:]) -} - -func hashBlocksGeneric(outs *[4][32]byte, msgs *[4][64]byte, prefix uint64) { - for i := range msgs { - outs[i] = hashBlockGeneric(&msgs[i], prefix) - } -} diff --git a/v2/internal/blake2b/blake2b_amd64.go b/v2/internal/blake2b/blake2b_amd64.go deleted file mode 100644 index 32c4181a..00000000 --- a/v2/internal/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -package blake2b - -import "golang.org/x/sys/cpu" - -//go:generate go run gen.go -out blake2b_amd64.s - -func hashBlock(msg *[64]byte, prefix uint64) [32]byte { - // TODO: asm - return hashBlockGeneric(msg, prefix) -} - -//go:noescape -func hashBlocksAVX2(outs *[4][32]byte, msgs *[4][64]byte, prefix uint64) - -func hashBlocks(outs *[4][32]byte, msgs *[4][64]byte, prefix uint64) { - switch { - case cpu.X86.HasAVX2: - hashBlocksAVX2(outs, msgs, prefix) - default: - hashBlocksGeneric(outs, msgs, prefix) - } -} diff --git a/v2/internal/blake2b/blake2b_amd64.s b/v2/internal/blake2b/blake2b_amd64.s deleted file mode 100644 index c760e5bc..00000000 --- a/v2/internal/blake2b/blake2b_amd64.s +++ /dev/null @@ -1,1646 +0,0 @@ -// Code generated by command: go run gen.go -out blake2b_amd64.s. DO NOT EDIT. - -#include "textflag.h" - -DATA shuffle_rot16<>+0(SB)/8, $0x0100070605040302 -DATA shuffle_rot16<>+8(SB)/8, $0x09080f0e0d0c0b0a -DATA shuffle_rot16<>+16(SB)/8, $0x1110171615141312 -DATA shuffle_rot16<>+24(SB)/8, $0x19181f1e1d1c1b1a -GLOBL shuffle_rot16<>(SB), RODATA|NOPTR, $32 - -DATA shuffle_rot24<>+0(SB)/8, $0x0201000706050403 -DATA shuffle_rot24<>+8(SB)/8, $0x0a09080f0e0d0c0b -DATA shuffle_rot24<>+16(SB)/8, $0x1211101716151413 -DATA shuffle_rot24<>+24(SB)/8, $0x1a19181f1e1d1c1b -GLOBL shuffle_rot24<>(SB), RODATA|NOPTR, $32 - -DATA shuffle_rot32<>+0(SB)/8, $0x0302010007060504 -DATA shuffle_rot32<>+8(SB)/8, $0x0b0a09080f0e0d0c -DATA shuffle_rot32<>+16(SB)/8, $0x1312111017161514 -DATA shuffle_rot32<>+24(SB)/8, $0x1b1a19181f1e1d1c -GLOBL shuffle_rot32<>(SB), RODATA|NOPTR, $32 - -DATA shuffle_first_vector<>+0(SB)/8, $0x06050403020100ff -DATA shuffle_first_vector<>+8(SB)/8, $0x0e0d0c0b0a0908ff -DATA shuffle_first_vector<>+16(SB)/8, $0x16151413121110ff -DATA shuffle_first_vector<>+24(SB)/8, $0x1e1d1c1b1a1918ff -GLOBL shuffle_first_vector<>(SB), RODATA|NOPTR, $32 - -DATA shuffle_last_vector<>+0(SB)/8, $0xffffffffffffff07 -DATA shuffle_last_vector<>+8(SB)/8, $0xffffffffffffff0f -DATA shuffle_last_vector<>+16(SB)/8, $0xffffffffffffff17 -DATA shuffle_last_vector<>+24(SB)/8, $0xffffffffffffff1f -GLOBL shuffle_last_vector<>(SB), RODATA|NOPTR, $32 - -DATA stride_64<>+0(SB)/8, $0x0000000000000000 -DATA stride_64<>+8(SB)/8, $0x0000000000000040 -DATA stride_64<>+16(SB)/8, $0x0000000000000080 -DATA stride_64<>+24(SB)/8, $0x00000000000000c0 -GLOBL stride_64<>(SB), RODATA|NOPTR, $32 - -DATA init_state<>+0(SB)/8, $0x6a09e667f2bdc928 -DATA init_state<>+8(SB)/8, $0xbb67ae8584caa73b -DATA init_state<>+16(SB)/8, $0x3c6ef372fe94f82b -DATA init_state<>+24(SB)/8, $0xa54ff53a5f1d36f1 -DATA init_state<>+32(SB)/8, $0x510e527fade682d1 -DATA init_state<>+40(SB)/8, $0x9b05688c2b3e6c1f -DATA init_state<>+48(SB)/8, $0x1f83d9abfb41bd6b -DATA init_state<>+56(SB)/8, $0x5be0cd19137e2179 -DATA init_state<>+64(SB)/8, $0x6a09e667f3bcc908 -DATA init_state<>+72(SB)/8, $0xbb67ae8584caa73b -DATA init_state<>+80(SB)/8, $0x3c6ef372fe94f82b -DATA init_state<>+88(SB)/8, $0xa54ff53a5f1d36f1 -DATA init_state<>+96(SB)/8, $0x510e527fade68290 -DATA init_state<>+104(SB)/8, $0x9b05688c2b3e6c1f -DATA init_state<>+112(SB)/8, $0xe07c265404be4294 -DATA init_state<>+120(SB)/8, $0x5be0cd19137e2179 -GLOBL init_state<>(SB), RODATA|NOPTR, $128 - -// func hashBlocksAVX2(outs *[4][32]byte, msgs *[4][64]byte, prefix uint64) -// Requires: AVX, AVX2 -TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-24 - MOVQ msgs+8(FP), AX - MOVQ outs+0(FP), CX - - // Transpose message vectors into the stack - VMOVDQU stride_64<>+0(SB), Y15 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, (AX)(Y15*1), Y0 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 7(AX)(Y15*1), Y1 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 15(AX)(Y15*1), Y2 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 23(AX)(Y15*1), Y3 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 31(AX)(Y15*1), Y4 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 39(AX)(Y15*1), Y5 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 47(AX)(Y15*1), Y6 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 55(AX)(Y15*1), Y7 - VPCMPEQD Y14, Y14, Y14 - VPGATHERQQ Y14, 56(AX)(Y15*1), Y8 - VPSHUFB shuffle_first_vector<>+0(SB), Y0, Y0 - VPBROADCASTQ prefix+16(FP), Y15 - VPOR Y0, Y15, Y0 - VPSHUFB shuffle_last_vector<>+0(SB), Y8, Y8 - VMOVDQU Y0, (SP) - VMOVDQU Y1, 32(SP) - VMOVDQU Y2, 64(SP) - VMOVDQU Y3, 96(SP) - VMOVDQU Y4, 128(SP) - VMOVDQU Y5, 160(SP) - VMOVDQU Y6, 192(SP) - VMOVDQU Y7, 224(SP) - VMOVDQU Y8, 256(SP) - - // Round setup - VPBROADCASTQ init_state<>+0(SB), Y0 - VPBROADCASTQ init_state<>+8(SB), Y1 - VPBROADCASTQ init_state<>+16(SB), Y2 - VPBROADCASTQ init_state<>+24(SB), Y3 - VPBROADCASTQ init_state<>+32(SB), Y4 - VPBROADCASTQ init_state<>+40(SB), Y5 - VPBROADCASTQ init_state<>+48(SB), Y6 - VPBROADCASTQ init_state<>+56(SB), Y7 - VPBROADCASTQ init_state<>+64(SB), Y8 - VPBROADCASTQ init_state<>+72(SB), Y9 - VPBROADCASTQ init_state<>+80(SB), Y10 - VPBROADCASTQ init_state<>+88(SB), Y11 - VPBROADCASTQ init_state<>+96(SB), Y12 - VPBROADCASTQ init_state<>+104(SB), Y13 - VPBROADCASTQ init_state<>+112(SB), Y14 - VPBROADCASTQ init_state<>+120(SB), Y15 - VMOVDQU Y8, 288(SP) - - // Round 1 - VPADDQ Y0, Y4, Y0 - VPADDQ (SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPADDQ 32(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 64(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ 96(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPADDQ 128(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPADDQ 160(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPADDQ 192(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPADDQ 224(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ 256(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 2 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 128(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ 256(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPADDQ 192(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ 32(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPADDQ (SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPADDQ 64(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ 224(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPADDQ 160(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPADDQ 96(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 3 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPADDQ 256(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ (SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPADDQ 160(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPADDQ 64(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPADDQ 96(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPADDQ 192(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPADDQ 224(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ 32(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPADDQ 128(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 4 - VPADDQ Y0, Y4, Y0 - VPADDQ 224(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 96(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ 32(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ 64(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPADDQ 192(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPADDQ 160(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPADDQ 128(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ (SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPADDQ 256(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 5 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPADDQ (SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 160(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ 224(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPADDQ 64(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPADDQ 128(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPADDQ 32(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPADDQ 192(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ 256(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPADDQ 96(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 6 - VPADDQ Y0, Y4, Y0 - VPADDQ 64(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 192(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPADDQ (SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPADDQ 256(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPADDQ 96(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ 128(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPADDQ 224(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPADDQ 160(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPADDQ 32(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 7 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPADDQ 160(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 32(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPADDQ 128(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ (SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPADDQ 224(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPADDQ 192(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPADDQ 96(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ 64(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPADDQ 256(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 8 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 224(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPADDQ 32(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPADDQ 96(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ 160(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPADDQ (SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPADDQ 128(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPADDQ 256(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ 192(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPADDQ 64(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 9 - VPADDQ Y0, Y4, Y0 - VPADDQ 192(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPADDQ 96(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPADDQ (SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPADDQ 256(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPADDQ 64(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPADDQ 224(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPADDQ 32(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ 128(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPADDQ 160(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 10 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPADDQ 64(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 256(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ 128(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPADDQ 224(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPADDQ 192(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPADDQ 32(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPADDQ 160(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPADDQ 96(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPADDQ (SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 11 - VPADDQ Y0, Y4, Y0 - VPADDQ (SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPADDQ 32(SP), Y0, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 64(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ 96(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPADDQ 128(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPADDQ 160(SP), Y2, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPADDQ 192(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPADDQ 224(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ 256(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Round 12 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ 288(SP), Y12, Y8 - VPXOR Y4, Y8, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y0, Y4, Y0 - VPXOR Y12, Y0, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y8, Y12, Y8 - VPXOR Y4, Y8, Y4 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - VPADDQ Y1, Y5, Y1 - VPADDQ 128(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y1, Y5, Y1 - VPADDQ 256(SP), Y1, Y1 - VPXOR Y13, Y1, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y9, Y13, Y9 - VPXOR Y5, Y9, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y2, Y6, Y2 - VPXOR Y14, Y2, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y10, Y14, Y10 - VPXOR Y6, Y10, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y3, Y7, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y3, Y7, Y3 - VPADDQ 192(SP), Y3, Y3 - VPXOR Y15, Y3, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y11, Y15, Y11 - VPXOR Y7, Y11, Y7 - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y0, Y5, Y0 - VPADDQ 32(SP), Y0, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot32<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSHUFB shuffle_rot24<>+0(SB), Y5, Y5 - VPADDQ Y0, Y5, Y0 - VPXOR Y15, Y0, Y15 - VPSHUFB shuffle_rot16<>+0(SB), Y15, Y15 - VPADDQ Y10, Y15, Y10 - VPXOR Y5, Y10, Y5 - VPSRLQ $0x3f, Y5, Y8 - VPSLLQ $0x01, Y5, Y5 - VPOR Y5, Y8, Y5 - VPADDQ Y1, Y6, Y1 - VPADDQ (SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot32<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSHUFB shuffle_rot24<>+0(SB), Y6, Y6 - VPADDQ Y1, Y6, Y1 - VPADDQ 64(SP), Y1, Y1 - VPXOR Y12, Y1, Y12 - VPSHUFB shuffle_rot16<>+0(SB), Y12, Y12 - VPADDQ Y11, Y12, Y11 - VPXOR Y6, Y11, Y6 - VPSRLQ $0x3f, Y6, Y8 - VPSLLQ $0x01, Y6, Y6 - VPOR Y6, Y8, Y6 - VPADDQ Y2, Y7, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot32<>+0(SB), Y13, Y13 - VPADDQ 288(SP), Y13, Y8 - VPXOR Y7, Y8, Y7 - VPSHUFB shuffle_rot24<>+0(SB), Y7, Y7 - VPADDQ Y2, Y7, Y2 - VPADDQ 224(SP), Y2, Y2 - VPXOR Y13, Y2, Y13 - VPSHUFB shuffle_rot16<>+0(SB), Y13, Y13 - VPADDQ Y8, Y13, Y8 - VPXOR Y7, Y8, Y7 - VMOVDQU Y8, 288(SP) - VPSRLQ $0x3f, Y7, Y8 - VPSLLQ $0x01, Y7, Y7 - VPOR Y7, Y8, Y7 - VPADDQ Y3, Y4, Y3 - VPADDQ 160(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot32<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSHUFB shuffle_rot24<>+0(SB), Y4, Y4 - VPADDQ Y3, Y4, Y3 - VPADDQ 96(SP), Y3, Y3 - VPXOR Y14, Y3, Y14 - VPSHUFB shuffle_rot16<>+0(SB), Y14, Y14 - VPADDQ Y9, Y14, Y9 - VPXOR Y4, Y9, Y4 - VPSRLQ $0x3f, Y4, Y8 - VPSLLQ $0x01, Y4, Y4 - VPOR Y4, Y8, Y4 - - // Finalize - VMOVDQU 288(SP), Y8 - VPXOR Y0, Y8, Y0 - VPBROADCASTQ init_state<>+0(SB), Y8 - VPXOR Y0, Y8, Y0 - VPXOR Y1, Y9, Y1 - VPBROADCASTQ init_state<>+8(SB), Y9 - VPXOR Y1, Y9, Y1 - VPXOR Y2, Y10, Y2 - VPBROADCASTQ init_state<>+16(SB), Y10 - VPXOR Y2, Y10, Y2 - VPXOR Y3, Y11, Y3 - VPBROADCASTQ init_state<>+24(SB), Y11 - VPXOR Y3, Y11, Y3 - - // Transpose state vectors into outs - VPUNPCKLQDQ Y1, Y0, Y4 - VPUNPCKHQDQ Y1, Y0, Y5 - VPUNPCKLQDQ Y3, Y2, Y6 - VPUNPCKHQDQ Y3, Y2, Y7 - VINSERTI128 $0x01, X6, Y4, Y0 - VINSERTI128 $0x01, X7, Y5, Y1 - VPERM2I128 $0x31, Y6, Y4, Y2 - VPERM2I128 $0x31, Y7, Y5, Y3 - VMOVDQU Y0, (CX) - VMOVDQU Y1, 32(CX) - VMOVDQU Y2, 64(CX) - VMOVDQU Y3, 96(CX) - RET diff --git a/v2/internal/blake2b/blake2b_generic.go b/v2/internal/blake2b/blake2b_generic.go deleted file mode 100644 index 0a696b3c..00000000 --- a/v2/internal/blake2b/blake2b_generic.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !amd64 -// +build !amd64 - -package blake2b - -func hashBlock(msg *[64]byte, prefix uint64) [32]byte { - return hashBlockGeneric(msg, prefix) -} - -func hashBlocks(outs *[4][32]byte, msgs *[4][64]byte, prefix uint64) { - hashBlocksGeneric(outs, msgs, prefix) -} diff --git a/v2/internal/blake2b/blake2b_test.go b/v2/internal/blake2b/blake2b_test.go deleted file mode 100644 index e1279cf8..00000000 --- a/v2/internal/blake2b/blake2b_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package blake2b - -import ( - "testing" - "unsafe" -) - -func TestBLAKE2b(t *testing.T) { - var leaves [4][64]byte - for i := range leaves { - for j := range leaves[i] { - leaves[i][j] = byte(i*64 + j + 7) - } - } - var refs [4][32]byte - hashBlocksGeneric(&refs, &leaves, 0) - var outs [4][32]byte - SumLeaves(&outs, &leaves) - for i := range outs { - if outs[i] != refs[i] { - t.Fatalf("mismatch %v:\nasm: %x\nref: %x", i, outs[i], refs[i]) - } - } - - parents := (*[8][32]byte)(unsafe.Pointer(&leaves)) - hashBlocksGeneric(&refs, &leaves, 1) - SumNodes(&outs, parents) - for i := range outs { - if outs[i] != refs[i] { - t.Fatalf("mismatch %v:\nasm: %x\nref: %x", i, outs[i], refs[i]) - } - } -} - -func BenchmarkBLAKE2b(b *testing.B) { - var leaves [4][64]byte - var nodes [8][32]byte - var outs [4][32]byte - b.Run("SumLeaves", func(b *testing.B) { - b.SetBytes(4 * 64) - for i := 0; i < b.N; i++ { - SumLeaves(&outs, &leaves) - } - }) - b.Run("SumNodes", func(b *testing.B) { - b.SetBytes(8 * 32) - for i := 0; i < b.N; i++ { - SumNodes(&outs, &nodes) - } - }) - b.Run("hashBlocksGeneric", func(b *testing.B) { - b.SetBytes(4 * 64) - for i := 0; i < b.N; i++ { - hashBlocksGeneric(&outs, &leaves, 0) - } - }) -} diff --git a/v2/internal/blake2b/gen.go b/v2/internal/blake2b/gen.go deleted file mode 100644 index 2eb01535..00000000 --- a/v2/internal/blake2b/gen.go +++ /dev/null @@ -1,317 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -import ( - "fmt" - "math/bits" -) - -// global vars -var ( - shuffleRot16 Mem - shuffleRot24 Mem - shuffleRot32 Mem - shuffleFirstVector Mem - shuffleLastVector Mem - stride64 Mem - initState Mem -) - -func main() { - genGlobals() - genHashBlocksAVX2() - - Generate() -} - -func genGlobals() { - // helper function for generating rotation shuffles - genRot := func(n int) (vals [4]U64) { - // in VPSHUFB, each byte specifies a destination index - u := bits.RotateLeft64(0x0706050403020100, n) - for i := range vals { - vals[i] = U64(u) - u += 0x0808080808080808 // add 8 to each byte - } - return - } - - shuffleRot16 = GLOBL("shuffle_rot16", RODATA|NOPTR) - for i, u := range genRot(-16) { - DATA(i*8, u) - } - shuffleRot24 = GLOBL("shuffle_rot24", RODATA|NOPTR) - for i, u := range genRot(-24) { - DATA(i*8, u) - } - shuffleRot32 = GLOBL("shuffle_rot32", RODATA|NOPTR) - for i, u := range genRot(-32) { - DATA(i*8, u) - } - - // For the first message vector, we want to shift in a zero byte: - // - // old: 0x0706050403020100 - // new: 0x0605040302010000 - shuffleFirstVector = GLOBL("shuffle_first_vector", RODATA|NOPTR) - for i, u := range genRot(8) { - // in VPSHUFB, FF means "zero this byte" - DATA(i*8, u|0x00000000000000FF) - } - // For the last vector, we want to keep only the last byte: - // - // old: 0x0706050403020100 - // new: 0x0000000000000007 - shuffleLastVector = GLOBL("shuffle_last_vector", RODATA|NOPTR) - for i, u := range genRot(8) { - DATA(i*8, u|0xFFFFFFFFFFFFFF00) - } - - // To load mesage vectors, we use VPGATHERQQ, which takes 4 memory offsets - // and loads a quadword (uint64) from each offset. Our messages are each 64 - // bytes long, so we use a stride of 64. - stride64 = GLOBL("stride_64", RODATA|NOPTR) - for i := 0; i < 4; i++ { - DATA(i*8, U64(i*64)) - } - - // All of our messages have the same length and consist of a single block. - // This means we can precompute the initial state for each hash. - initState = GLOBL("init_state", RODATA|NOPTR) - for i, v := range [16]U64{ - // h0 .. h7 - 0x6a09e667f2bdc928, - 0xbb67ae8584caa73b, - 0x3c6ef372fe94f82b, - 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, - 0x9b05688c2b3e6c1f, - 0x1f83d9abfb41bd6b, - 0x5be0cd19137e2179, - // iv ^ parameter block - 0x6a09e667f3bcc908, - 0xbb67ae8584caa73b, - 0x3c6ef372fe94f82b, - 0xa54ff53a5f1d36f1, - 0x510e527fade68290, // xor'd with 65 (input length) - 0x9b05688c2b3e6c1f, - 0xe07c265404be4294, // xor'd with ~0 (final block) - 0x5be0cd19137e2179, - } { - DATA(i*8, v) - } -} - -func genHashBlocksAVX2() { - TEXT("hashBlocksAVX2", NOSPLIT, "func(outs *[4][32]byte, msgs *[4][64]byte, prefix uint64)") - Pragma("noescape") - msgs := Mem{Base: Load(Param("msgs"), GP64())} - outs := Mem{Base: Load(Param("outs"), GP64())} - prefix, _ := Param("prefix").Resolve() - // We'll be using all 16 YMM registers, so allocate them all up-front so - // that we can manage them manually. - vs := [16]VecVirtual{ - YMM(), YMM(), YMM(), YMM(), - YMM(), YMM(), YMM(), YMM(), - YMM(), YMM(), YMM(), YMM(), - YMM(), YMM(), YMM(), YMM(), - } - - // Store transposed msgs on the stack. Since our messages are 65 bytes (64 - // plus a prefix byte), we need space for 9 transposed vectors. - block := AllocLocal(9 * 32) - - { - Comment("Transpose message vectors into the stack") - VMOVDQU(stride64, vs[15]) - for i := range vs[:9] { - // Since we need to prepend a byte, our loads are offset by 1. This - // means that we need to handle the first and last vectors - // differently in order to avoid out-of-bounds memory access. - var offset int - switch i { - case 0: - offset = 0 - default: - offset = i*8 - 1 - case 8: - offset = (i - 1) * 8 - } - strideMem := msgs.Offset(offset).Idx(vs[15], 1) - // The first argument to VPGATHERQQ is a mask that controls which - // offsets are read. In our case, we always want to read all - // offsets, so we set the mask to all ones. Annoyingly, VPGATHERQQ - // zeros this argument on completion, so we need to reset it each - // time. - VPCMPEQD(vs[14], vs[14], vs[14]) // fastest way to set all bits to 1 - VPGATHERQQ(vs[14], strideMem, vs[i]) - } - // Adjust the first and last vectors: the words in the first vector need - // to be prefixed, and the words in the last vector need to contain only - // their first byte. - VPSHUFB(shuffleFirstVector, vs[0], vs[0]) - VPBROADCASTQ(prefix.Addr, vs[15]) - VPOR(vs[0], vs[15], vs[0]) - VPSHUFB(shuffleLastVector, vs[8], vs[8]) - for i, v := range vs[:9] { - VMOVDQU(v, block.Offset(i*32)) - } - } - - { - Comment("Round setup") - for i := range vs { - VPBROADCASTQ(initState.Offset(i*8), vs[i]) - } - // There are 16 uint64s in each block, and we have 16 YMM registers, - // each of which holds 4 uint64s, so we can process 4 blocks at a time. - // Unfortunately, the G function involves a rotate by 63 bits, which - // requires a temporary register. To get around this, we "spill" one of - // the YMM registers to memory and only reload it when we need it, so - // that we can use that register for the rotate. - tmp := vs[8] - spillMem := AllocLocal(32) - VMOVDQU(vs[8], spillMem) - - for i := 0; i < 12; i++ { - Comment(fmt.Sprintf("Round %v", i+1)) - msgs := permutation(block, i) - round(vs, msgs, tmp, spillMem) - } - - Comment("Finalize") - VMOVDQU(spillMem, vs[8]) // reload spilled register - // Since we're outputting 32-byte hashes, we only need the first 4 - // registers. - for i := range vs[:4] { - VPXOR(vs[i], vs[i+8], vs[i]) - VPBROADCASTQ(initState.Offset(i*8), vs[i+8]) - VPXOR(vs[i], vs[i+8], vs[i]) - } - } - - { - Comment("Transpose state vectors into outs") - // AVX2 doesn't have a strided store ("scatter") instruction, so instead - // we transpose the registers manually and then write them out - // sequentially. We can transpose by interleaving each uint64, and then - // interleaving groups of two uint64s. - - // Interleave each uint64: - // - // v0: 0 1 2 3 - // v1: 4 5 6 7 - // v2: 8 9 10 11 - // v3: 12 13 14 15 - // -> - // v4: 0 4 2 6 - // v5: 1 5 3 7 - // v6: 8 12 10 14 - // v7: 9 13 11 15 - VPUNPCKLQDQ(vs[1], vs[0], vs[4]) - VPUNPCKHQDQ(vs[1], vs[0], vs[5]) - VPUNPCKLQDQ(vs[3], vs[2], vs[6]) - VPUNPCKHQDQ(vs[3], vs[2], vs[7]) - // Interleave groups of two uint64s: - // - // -> - // v0: 0 4 8 12 - // v1: 1 5 9 13 - // v2: 2 6 10 14 - // v3: 3 7 11 15 - VINSERTI128(Imm(1), vs[6].AsX(), vs[4], vs[0]) - VINSERTI128(Imm(1), vs[7].AsX(), vs[5], vs[1]) - VPERM2I128(Imm(0x31), vs[6], vs[4], vs[2]) - VPERM2I128(Imm(0x31), vs[7], vs[5], vs[3]) - for i, v := range vs[:4] { - VMOVDQU(v, outs.Offset(i*32)) - } - } - - RET() -} - -func round(sv [16]VecVirtual, mv [16]Mem, tmp VecVirtual, spillMem Mem) { - g(sv[0], sv[4], sv[8], sv[12], mv[0], mv[1], tmp, spillMem) - g(sv[1], sv[5], sv[9], sv[13], mv[2], mv[3], tmp, spillMem) - g(sv[2], sv[6], sv[10], sv[14], mv[4], mv[5], tmp, spillMem) - g(sv[3], sv[7], sv[11], sv[15], mv[6], mv[7], tmp, spillMem) - g(sv[0], sv[5], sv[10], sv[15], mv[8], mv[9], tmp, spillMem) - g(sv[1], sv[6], sv[11], sv[12], mv[10], mv[11], tmp, spillMem) - g(sv[2], sv[7], sv[8], sv[13], mv[12], mv[13], tmp, spillMem) - g(sv[3], sv[4], sv[9], sv[14], mv[14], mv[15], tmp, spillMem) -} - -func rotr63(v, tmp VecVirtual) { - VPSRLQ(Imm(63), v, tmp) - VPSLLQ(Imm(1), v, v) - VPOR(v, tmp, v) -} - -// zeroMem is a sentinel value that represents a message vector that contains -// all zeros. This lets us skip a VPADDQ in g. -var zeroMem Mem - -func g(a, b, c, d VecVirtual, mx, my Mem, tmp VecVirtual, spillMem Mem) { - VPADDQ(a, b, a) - if mx != zeroMem { - VPADDQ(mx, a, a) - } - VPXOR(d, a, d) - VPSHUFB(shuffleRot32, d, d) - // When the spilled register (v[8]) is used in g, it's always passed as the - // c parameter. If so, we need to reload the spill for this add. - if c == tmp { - VPADDQ(spillMem, d, c) - } else { - VPADDQ(c, d, c) - } - VPXOR(b, c, b) - VPSHUFB(shuffleRot24, b, b) - VPADDQ(a, b, a) - if my != zeroMem { - VPADDQ(my, a, a) - } - VPXOR(d, a, d) - VPSHUFB(shuffleRot16, d, d) - VPADDQ(c, d, c) - VPXOR(b, c, b) - // We're done with c (and we need the tmp register), so if we reloaded it, - // spill it now. - if c == tmp { - VMOVDQU(c, spillMem) - } - rotr63(b, tmp) -} - -// Each round uses a different permutation of the message vectors. Since we're -// inlining everything, we can permute the memory *locations* instead of the -// memory itself. -func permutation(msg Mem, n int) [16]Mem { - perms := [12][16]int{ - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3}, - {11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4}, - {7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8}, - {9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13}, - {2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9}, - {12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11}, - {13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10}, - {6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5}, - {10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0}, - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3}, - } - var m [16]Mem - for i, j := range perms[n] { - // Since our messages are 65 bytes, m9 through m15 are all zeros. - if j < 9 { - m[i] = msg.Offset(j * 32) - } else { - m[i] = zeroMem - } - } - return m -} diff --git a/v2/internal/chainutil/chainutil.go b/v2/internal/chainutil/chainutil.go deleted file mode 100644 index 0f699540..00000000 --- a/v2/internal/chainutil/chainutil.go +++ /dev/null @@ -1,238 +0,0 @@ -package chainutil - -import ( - "time" - - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/types" -) - -// FindBlockNonce finds a block nonce meeting the target. -func FindBlockNonce(cs consensus.State, h *types.BlockHeader, target types.BlockID) { - // ensure nonce meets factor requirement - for h.Nonce%cs.NonceFactor() != 0 { - h.Nonce++ - } - for !h.ID().MeetsTarget(target) { - h.Nonce += cs.NonceFactor() - } -} - -// JustHeaders renters only the headers of each block. -func JustHeaders(blocks []types.Block) []types.BlockHeader { - headers := make([]types.BlockHeader, len(blocks)) - for i := range headers { - headers[i] = blocks[i].Header - } - return headers -} - -// JustTransactions returns only the transactions of each block. -func JustTransactions(blocks []types.Block) [][]types.Transaction { - txns := make([][]types.Transaction, len(blocks)) - for i := range txns { - txns[i] = blocks[i].Transactions - } - return txns -} - -// JustTransactionIDs returns only the transaction ids included in each block. -func JustTransactionIDs(blocks []types.Block) [][]types.TransactionID { - txns := make([][]types.TransactionID, len(blocks)) - for i := range txns { - txns[i] = make([]types.TransactionID, len(blocks[i].Transactions)) - for j := range txns[i] { - txns[i][j] = blocks[i].Transactions[j].ID() - } - } - return txns -} - -// JustChainIndexes returns only the chain index of each block. -func JustChainIndexes(blocks []types.Block) []types.ChainIndex { - cis := make([]types.ChainIndex, len(blocks)) - for i := range cis { - cis[i] = blocks[i].Index() - } - return cis -} - -// ChainSim represents a simulation of a blockchain. -type ChainSim struct { - Genesis consensus.Checkpoint - Chain []types.Block - State consensus.State - - nonce uint64 // for distinguishing forks - - // for simulating transactions - pubkey types.PublicKey - privkey types.PrivateKey - outputs []types.SiacoinElement -} - -// Fork forks the current chain. -func (cs *ChainSim) Fork() *ChainSim { - cs2 := *cs - cs2.Chain = append([]types.Block(nil), cs2.Chain...) - cs2.outputs = append([]types.SiacoinElement(nil), cs2.outputs...) - cs.nonce += 1 << 48 - return &cs2 -} - -// MineBlockWithTxns mine a block with the given transaction. -func (cs *ChainSim) MineBlockWithTxns(txns ...types.Transaction) types.Block { - prev := cs.Genesis.Block.Header - if len(cs.Chain) > 0 { - prev = cs.Chain[len(cs.Chain)-1].Header - } - b := types.Block{ - Header: types.BlockHeader{ - Height: prev.Height + 1, - ParentID: prev.ID(), - Nonce: cs.nonce, - Timestamp: prev.Timestamp.Add(time.Second), - MinerAddress: types.VoidAddress, - }, - Transactions: txns, - } - b.Header.Commitment = cs.State.Commitment(b.Header.MinerAddress, b.Transactions) - FindBlockNonce(cs.State, &b.Header, types.HashRequiringWork(cs.State.Difficulty)) - - sau := consensus.ApplyBlock(cs.State, b) - cs.State = sau.State - cs.Chain = append(cs.Chain, b) - - // update our outputs - for i := range cs.outputs { - sau.UpdateElementProof(&cs.outputs[i].StateElement) - } - for _, out := range sau.NewSiacoinElements { - if out.Address == types.StandardAddress(cs.pubkey) { - cs.outputs = append(cs.outputs, out) - } - } - - return b -} - -// MineBlockWithSiacoinOutputs mines a block with a transaction containing the -// specified siacoin outputs. The ChainSim must have funds equal to or exceeding -// the sum of the outputs. -func (cs *ChainSim) MineBlockWithSiacoinOutputs(scos ...types.SiacoinOutput) types.Block { - txn := types.Transaction{ - SiacoinOutputs: scos, - MinerFee: types.NewCurrency64(cs.State.Index.Height), - } - - totalOut := txn.MinerFee - for _, b := range scos { - totalOut = totalOut.Add(b.Value) - } - - // select inputs and compute change output - var totalIn types.Currency - for i, out := range cs.outputs { - txn.SiacoinInputs = append(txn.SiacoinInputs, types.SiacoinInput{ - Parent: out, - SpendPolicy: types.PolicyPublicKey(cs.pubkey), - }) - totalIn = totalIn.Add(out.Value) - if totalIn.Cmp(totalOut) >= 0 { - cs.outputs = cs.outputs[i+1:] - break - } - } - - if totalIn.Cmp(totalOut) < 0 { - panic("insufficient funds") - } else if totalIn.Cmp(totalOut) > 0 { - // add change output - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Address: types.StandardAddress(cs.pubkey), - Value: totalIn.Sub(totalOut), - }) - } - - // sign and mine - sigHash := cs.State.InputSigHash(txn) - for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].Signatures = []types.Signature{cs.privkey.SignHash(sigHash)} - } - return cs.MineBlockWithTxns(txn) -} - -// MineBlock mine an empty block. -func (cs *ChainSim) MineBlock() types.Block { - // simulate chain activity by sending our existing outputs to new addresses - var txns []types.Transaction - for _, out := range cs.outputs { - txn := types.Transaction{ - SiacoinInputs: []types.SiacoinInput{{ - Parent: out, - SpendPolicy: types.PolicyPublicKey(cs.pubkey), - }}, - SiacoinOutputs: []types.SiacoinOutput{ - {Address: types.StandardAddress(cs.pubkey), Value: out.Value.Sub(types.NewCurrency64(cs.State.Index.Height + 1))}, - {Address: types.Address{byte(cs.nonce >> 48), byte(cs.nonce >> 56), 1, 2, 3}, Value: types.NewCurrency64(1)}, - }, - MinerFee: types.NewCurrency64(cs.State.Index.Height), - } - sigHash := cs.State.InputSigHash(txn) - for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].Signatures = []types.Signature{cs.privkey.SignHash(sigHash)} - } - - txns = append(txns, txn) - } - cs.outputs = cs.outputs[:0] - return cs.MineBlockWithTxns(txns...) -} - -// MineBlocks mine a number of blocks. -func (cs *ChainSim) MineBlocks(n int) []types.Block { - blocks := make([]types.Block, n) - for i := range blocks { - blocks[i] = cs.MineBlock() - } - return blocks -} - -// NewChainSim returns a new ChainSim useful for simulating forks. -func NewChainSim() *ChainSim { - // gift ourselves some coins in the genesis block - privkey := types.GeneratePrivateKey() - pubkey := privkey.PublicKey() - ourAddr := types.StandardAddress(pubkey) - gift := make([]types.SiacoinOutput, 10) - for i := range gift { - gift[i] = types.SiacoinOutput{ - Address: ourAddr, - Value: types.Siacoins(10 * uint32(i+1)), - } - } - genesisTxns := []types.Transaction{{SiacoinOutputs: gift}} - genesis := types.Block{ - Header: types.BlockHeader{ - Timestamp: time.Unix(734600000, 0).UTC(), - }, - Transactions: genesisTxns, - } - sau := consensus.GenesisUpdate(genesis, types.Work{NumHashes: [32]byte{31: 4}}) - var outputs []types.SiacoinElement - for _, out := range sau.NewSiacoinElements { - if out.Address == types.StandardAddress(pubkey) { - outputs = append(outputs, out) - } - } - return &ChainSim{ - Genesis: consensus.Checkpoint{ - Block: genesis, - State: sau.State, - }, - State: sau.State, - privkey: privkey, - pubkey: pubkey, - outputs: outputs, - } -} diff --git a/v2/internal/chainutil/chainutil_test.go b/v2/internal/chainutil/chainutil_test.go deleted file mode 100644 index 811bd655..00000000 --- a/v2/internal/chainutil/chainutil_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package chainutil - -import ( - "reflect" - "testing" - - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -func TestJust(t *testing.T) { - headers := []types.BlockHeader{{Height: 0}, {Height: 1}} - chainIndexes := []types.ChainIndex{headers[0].Index(), headers[1].Index()} - transactions := [][]types.Transaction{{{ArbitraryData: []byte("test")}}, {{MinerFee: types.NewCurrency64(1)}}} - transactionIDs := [][]types.TransactionID{{transactions[0][0].ID()}, {transactions[1][0].ID()}} - blocks := []types.Block{ - {Header: headers[0], Transactions: transactions[0]}, - {Header: headers[1], Transactions: transactions[1]}, - } - - if !reflect.DeepEqual(headers, JustHeaders(blocks)) { - t.Fatal("block header slice does not equal slice returned by JustHeaders") - } - if !reflect.DeepEqual(transactions, JustTransactions(blocks)) { - t.Fatal("transactions slice does not equal slice returned by JustTransactions") - } - if !reflect.DeepEqual(transactionIDs, JustTransactionIDs(blocks)) { - t.Fatal("transactionIDs slice does not equal slice returned by JustTransactionIDs") - } - if !reflect.DeepEqual(chainIndexes, JustChainIndexes(blocks)) { - t.Fatal("chainIndexes slice does not equal slice returned by JustChainIndexes") - } -} - -func TestChainSim(t *testing.T) { - sim := NewChainSim() - - for i := 0; i < 5; i++ { - height := uint64(i + 1) - block := sim.MineBlock() - - if height != block.Header.Height { - t.Fatalf("invalid block height: expected %d, got %d", height, block.Header.Height) - } - if block.Index() != sim.State.Index { - t.Fatalf("simulation index not updated, expected %v, got %v", block.Index(), sim.State.Index) - } - } - - var address types.Address - frand.Read(address[:]) - - // these amounts are below the gift amounts specified in NewChainSim - outputs := []types.SiacoinOutput{{Address: address, Value: types.NewCurrency64(1)}, {Address: address, Value: types.NewCurrency64(10)}} - block := sim.MineBlockWithSiacoinOutputs(outputs...) - found := false - for _, txn := range block.Transactions { - for _, output := range txn.SiacoinOutputs { - if output.Address == address { - found = true - break - } - } - } - if !found { - t.Fatal("siacoinoutputs to address were not found despite block being mined") - } - - fork := sim.Fork() - if sim.State.Index != fork.State.Index { - t.Fatalf("forked chain did not have same index as original chain, expected %v, got %v", sim.State.Index, fork.State.Index) - } - - lastIndex := sim.State.Index - sim.MineBlock() - if sim.State.Index == fork.State.Index { - t.Fatalf("fork incorrectly updated along with original chain, expected %v, got %v", lastIndex, fork.State.Index) - } - - lastIndex = sim.State.Index - fork.MineBlocks(2) - if sim.State.Index == fork.State.Index { - t.Fatalf("original chain incorrectly updated along with fork chain, expected %v, got %v", lastIndex, sim.State.Index) - } -} diff --git a/v2/internal/chainutil/store.go b/v2/internal/chainutil/store.go deleted file mode 100644 index df91644d..00000000 --- a/v2/internal/chainutil/store.go +++ /dev/null @@ -1,462 +0,0 @@ -package chainutil - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "path/filepath" - - "go.sia.tech/core/v2/chain" - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/types" -) - -// EphemeralStore implements chain.ManagerStore in memory. -type EphemeralStore struct { - entries map[types.ChainIndex]consensus.Checkpoint - best []types.ChainIndex -} - -// AddCheckpoint implements chain.ManagerStore. -func (es *EphemeralStore) AddCheckpoint(c consensus.Checkpoint) error { - es.entries[c.State.Index] = c - return nil -} - -// Checkpoint implements chain.ManagerStore. -func (es *EphemeralStore) Checkpoint(index types.ChainIndex) (consensus.Checkpoint, error) { - e, ok := es.entries[index] - if !ok { - return consensus.Checkpoint{}, chain.ErrUnknownIndex - } - return e, nil -} - -// Header implements chain.ManagerStore. -func (es *EphemeralStore) Header(index types.ChainIndex) (types.BlockHeader, error) { - c, err := es.Checkpoint(index) - return c.Block.Header, err -} - -// ExtendBest implements chain.ManagerStore. -func (es *EphemeralStore) ExtendBest(index types.ChainIndex) error { - if _, ok := es.entries[index]; !ok { - panic("no entry for index") - } - es.best = append(es.best, index) - return nil -} - -// RewindBest implements chain.ManagerStore. -func (es *EphemeralStore) RewindBest() error { - es.best = es.best[:len(es.best)-1] - return nil -} - -// BestIndex implements chain.ManagerStore. -func (es *EphemeralStore) BestIndex(height uint64) (types.ChainIndex, error) { - baseHeight, tipHeight := es.best[0].Height, es.best[len(es.best)-1].Height - if !(baseHeight <= height && height <= tipHeight) { - return types.ChainIndex{}, chain.ErrUnknownIndex - } - return es.best[height-baseHeight], nil -} - -// Flush implements chain.ManagerStore. -func (es *EphemeralStore) Flush() error { return nil } - -// Close implements chain.ManagerStore. -func (es *EphemeralStore) Close() error { return nil } - -// NewEphemeralStore returns an in-memory chain.ManagerStore. -func NewEphemeralStore(c consensus.Checkpoint) *EphemeralStore { - return &EphemeralStore{ - entries: map[types.ChainIndex]consensus.Checkpoint{c.State.Index: c}, - best: []types.ChainIndex{c.State.Index}, - } -} - -type metadata struct { - indexSize int64 - entrySize int64 - tip types.ChainIndex -} - -// FlatStore implements chain.ManagerStore with persistent files. -type FlatStore struct { - indexFile *os.File - entryFile *os.File - bestFile *os.File - - meta metadata - metapath string - - base types.ChainIndex - offsets map[types.ChainIndex]int64 -} - -// AddCheckpoint implements chain.ManagerStore. -func (fs *FlatStore) AddCheckpoint(c consensus.Checkpoint) error { - offset, err := fs.entryFile.Seek(0, io.SeekEnd) - if err != nil { - return fmt.Errorf("failed to seek: %w", err) - } - if err := writeCheckpoint(fs.entryFile, c); err != nil { - return fmt.Errorf("failed to write checkpoint: %w", err) - } else if err := writeIndex(fs.indexFile, c.State.Index, offset); err != nil { - return fmt.Errorf("failed to write index: %w", err) - } - stat, err := fs.entryFile.Stat() - if err != nil { - return fmt.Errorf("failed to stat file: %w", err) - } - fs.offsets[c.State.Index] = offset - fs.meta.entrySize = stat.Size() - fs.meta.indexSize += indexSize - return nil -} - -// Checkpoint implements chain.ManagerStore. -func (fs *FlatStore) Checkpoint(index types.ChainIndex) (c consensus.Checkpoint, err error) { - if offset, ok := fs.offsets[index]; !ok { - return consensus.Checkpoint{}, chain.ErrUnknownIndex - } else if _, err := fs.entryFile.Seek(offset, io.SeekStart); err != nil { - return consensus.Checkpoint{}, fmt.Errorf("failed to seek entry file: %w", err) - } - err = readCheckpoint(bufio.NewReader(fs.entryFile), &c) - if err != nil { - return consensus.Checkpoint{}, fmt.Errorf("failed to read checkpoint: %w", err) - } - return -} - -// Header implements chain.ManagerStore. -func (fs *FlatStore) Header(index types.ChainIndex) (types.BlockHeader, error) { - b := make([]byte, 1+8+32+8+8+32+32) - if offset, ok := fs.offsets[index]; !ok { - return types.BlockHeader{}, chain.ErrUnknownIndex - } else if _, err := fs.entryFile.ReadAt(b, offset); err != nil { - return types.BlockHeader{}, fmt.Errorf("failed to read header at offset %v: %w", offset, err) - } - d := types.NewBufDecoder(b) - if version := d.ReadUint8(); version != 1 { - return types.BlockHeader{}, fmt.Errorf("unsupported block version (%v)", version) - } - var h types.BlockHeader - h.DecodeFrom(d) - if err := d.Err(); err != nil { - return types.BlockHeader{}, fmt.Errorf("failed to decode header: %w", err) - } - return h, nil -} - -// ExtendBest implements chain.ManagerStore. -func (fs *FlatStore) ExtendBest(index types.ChainIndex) error { - if err := writeBest(fs.bestFile, index); err != nil { - return fmt.Errorf("failed to write to store: %w", err) - } - fs.meta.tip = index - return nil -} - -// RewindBest implements chain.ManagerStore. -func (fs *FlatStore) RewindBest() error { - index, err := fs.BestIndex(fs.meta.tip.Height - 1) - if err != nil { - return fmt.Errorf("failed to get parent index %v: %w", fs.meta.tip.Height-1, err) - } else if off, err := fs.bestFile.Seek(-bestSize, io.SeekEnd); err != nil { - return fmt.Errorf("failed to seek best file: %w", err) - } else if err := fs.bestFile.Truncate(off); err != nil { - return fmt.Errorf("failed to truncate file: %w", err) - } - fs.meta.tip = index - return nil -} - -// BestIndex implements chain.ManagerStore. -func (fs *FlatStore) BestIndex(height uint64) (index types.ChainIndex, err error) { - if height < fs.base.Height { - return types.ChainIndex{}, chain.ErrPruned - } - offset := int64(height-fs.base.Height) * bestSize - buf := make([]byte, bestSize) - if _, err = fs.bestFile.ReadAt(buf, offset); err == io.EOF { - err = chain.ErrUnknownIndex - return - } - - d := types.NewBufDecoder(buf) - index.DecodeFrom(d) - if err = d.Err(); err != nil { - err = fmt.Errorf("failed to decode index: %w", err) - return - } - return index, nil -} - -// Flush implements chain.ManagerStore. -func (fs *FlatStore) Flush() error { - // TODO: also sync parent directory? - if err := fs.indexFile.Sync(); err != nil { - return fmt.Errorf("failed to sync index file: %w", err) - } else if err := fs.entryFile.Sync(); err != nil { - return fmt.Errorf("failed to sync entry file: %w", err) - } else if err := fs.bestFile.Sync(); err != nil { - return fmt.Errorf("failed to sync best file: %w", err) - } - - // atomically update metafile - f, err := os.OpenFile(fs.metapath+"_tmp", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0660) - if err != nil { - return fmt.Errorf("failed to open tmp file: %w", err) - } - defer f.Close() - if err := writeMeta(f, fs.meta); err != nil { - return fmt.Errorf("failed to write meta tmp: %w", err) - } else if f.Sync(); err != nil { - return fmt.Errorf("failed to sync meta tmp: %w", err) - } else if f.Close(); err != nil { - return fmt.Errorf("failed to close meta tmp: %w", err) - } else if err := os.Rename(fs.metapath+"_tmp", fs.metapath); err != nil { - return fmt.Errorf("failed to rename meta tmp: %w", err) - } - - return nil -} - -func (fs *FlatStore) recoverBest(tip types.ChainIndex) error { - // if the store is empty, wipe the bestFile too - if len(fs.offsets) == 0 { - if err := fs.bestFile.Truncate(0); err != nil { - return fmt.Errorf("failed to truncate best file: %w", err) - } - return nil - } - - // truncate to multiple of bestSize - if stat, err := fs.bestFile.Stat(); err != nil { - return fmt.Errorf("failed to stat best file: %w", err) - } else if n := stat.Size() / bestSize; n%bestSize != 0 { - if err := fs.bestFile.Truncate(n * bestSize); err != nil { - return fmt.Errorf("failed to truncate best file: %w", err) - } - } - - // initialize base - base, err := readBest(fs.bestFile) - if err != nil { - return fmt.Errorf("failed to initialize best index: %w", err) - } - fs.base = base - - // recover best chain by reading parents of tip, stopping when the index is - // also in bestFile - index := tip - var path []types.ChainIndex - for { - if bestIndex, err := fs.BestIndex(index.Height); err != nil && !errors.Is(err, chain.ErrUnknownIndex) { - return fmt.Errorf("failed to get index at %v: %w", index.Height, err) - } else if err == nil { - return nil - } else if bestIndex == index { - break - } - path = append(path, index) - h, err := fs.Header(index) - if err != nil { - return fmt.Errorf("failed to get block header %v: %w", index, err) - } - index = h.ParentIndex() - } - // truncate and extend - if err := fs.bestFile.Truncate(int64(index.Height-base.Height) * bestSize); err != nil { - return fmt.Errorf("failed to truncate best file (%v - %v): %w", index.Height, base.Height, err) - } - for i := len(path) - 1; i >= 0; i-- { - if err := fs.ExtendBest(path[i]); err != nil { - return fmt.Errorf("failed to extend best file %v: %w", path[i], err) - } - } - - return nil -} - -// Close closes the store. -func (fs *FlatStore) Close() (err error) { - errs := []error{ - fmt.Errorf("error closing index file: %w", fs.indexFile.Close()), - fmt.Errorf("error closing entry file: %w", fs.entryFile.Close()), - fmt.Errorf("error closing best file: %w", fs.bestFile.Close()), - } - for _, err := range errs { - if errors.Unwrap(err) != nil { - return err - } - } - return nil -} - -// NewFlatStore returns a FlatStore that stores data in the specified dir. -func NewFlatStore(dir string, c consensus.Checkpoint) (*FlatStore, consensus.Checkpoint, error) { - indexFile, err := os.OpenFile(filepath.Join(dir, "index.dat"), os.O_CREATE|os.O_RDWR, 0o660) - if err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to open index file: %w", err) - } - entryFile, err := os.OpenFile(filepath.Join(dir, "entry.dat"), os.O_CREATE|os.O_RDWR, 0o660) - if err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to open entry file: %w", err) - } - bestFile, err := os.OpenFile(filepath.Join(dir, "best.dat"), os.O_CREATE|os.O_RDWR, 0o660) - if err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to open best file: %w", err) - } - - // trim indexFile and entryFile according to metadata - metapath := filepath.Join(dir, "meta.dat") - meta, err := readMetaFile(metapath) - if errors.Is(err, os.ErrNotExist) { - // initial metadata - meta = metadata{tip: c.State.Index} - } else if err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to read meta file %s: %w", metapath, err) - } else if err := indexFile.Truncate(meta.indexSize); err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("failed to truncate meta index: %w", err) - } else if err := entryFile.Truncate(meta.entrySize); err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("failed to truncate meta entrry: %w", err) - } - - // read index entries into map - offsets := make(map[types.ChainIndex]int64) - for { - index, offset, err := readIndex(indexFile) - if err == io.EOF { - break - } else if err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("failed to read index: %w", err) - } - offsets[index] = offset - } - - fs := &FlatStore{ - indexFile: indexFile, - entryFile: entryFile, - bestFile: bestFile, - - meta: meta, - metapath: metapath, - - base: c.State.Index, - offsets: offsets, - } - - // recover bestFile, if necessary - if err := fs.recoverBest(meta.tip); err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to recover best at %v: %w", meta.tip, err) - } - if _, err := fs.bestFile.Seek(0, io.SeekEnd); err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to seek to end of best file: %w", err) - } - - // if store is empty, write base entry - if len(fs.offsets) == 0 { - if err := fs.AddCheckpoint(c); err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to write checkpoint for %v: %w", c.State.Index, err) - } else if err := fs.ExtendBest(c.State.Index); err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("failed to extend best for %v: %w", c.State.Index, err) - } - return fs, c, nil - } - - c, err = fs.Checkpoint(meta.tip) - if err != nil { - return nil, consensus.Checkpoint{}, fmt.Errorf("unable to get checkpoint %v: %w", meta.tip, err) - } - return fs, c, nil -} - -const ( - bestSize = 40 - indexSize = 48 - metaSize = 56 -) - -func bufferedDecoder(r io.Reader, size int) (*types.Decoder, error) { - buf := make([]byte, size) - _, err := io.ReadFull(r, buf) - return types.NewBufDecoder(buf), err -} - -func writeMeta(w io.Writer, meta metadata) error { - e := types.NewEncoder(w) - e.WriteUint64(uint64(meta.indexSize)) - e.WriteUint64(uint64(meta.entrySize)) - meta.tip.EncodeTo(e) - return e.Flush() -} - -func readMeta(r io.Reader) (meta metadata, err error) { - d, err := bufferedDecoder(r, metaSize) - meta.indexSize = int64(d.ReadUint64()) - meta.entrySize = int64(d.ReadUint64()) - meta.tip.DecodeFrom(d) - return -} - -func readMetaFile(path string) (meta metadata, err error) { - f, err := os.Open(path) - if err != nil { - return metadata{}, fmt.Errorf("unable to open metafile %s: %w", path, err) - } - defer f.Close() - meta, err = readMeta(f) - if err != nil { - err = fmt.Errorf("unable to read meta file %s: %w", path, err) - } - return -} - -func writeBest(w io.Writer, index types.ChainIndex) error { - e := types.NewEncoder(w) - index.EncodeTo(e) - return e.Flush() -} - -func readBest(r io.Reader) (index types.ChainIndex, err error) { - d, err := bufferedDecoder(r, bestSize) - index.DecodeFrom(d) - return -} - -func writeIndex(w io.Writer, index types.ChainIndex, offset int64) error { - e := types.NewEncoder(w) - index.EncodeTo(e) - e.WriteUint64(uint64(offset)) - return e.Flush() -} - -func readIndex(r io.Reader) (index types.ChainIndex, offset int64, err error) { - d, err := bufferedDecoder(r, indexSize) - index.DecodeFrom(d) - offset = int64(d.ReadUint64()) - return -} - -func writeCheckpoint(w io.Writer, c consensus.Checkpoint) error { - e := types.NewEncoder(w) - (merkle.CompressedBlock)(c.Block).EncodeTo(e) - c.State.EncodeTo(e) - return e.Flush() -} - -func readCheckpoint(r io.Reader, c *consensus.Checkpoint) error { - d := types.NewDecoder(io.LimitedReader{ - R: r, - N: 10e6, // a checkpoint should never be anywhere near this large - }) - (*merkle.CompressedBlock)(&c.Block).DecodeFrom(d) - c.State.DecodeFrom(d) - return d.Err() -} diff --git a/v2/internal/chainutil/store_test.go b/v2/internal/chainutil/store_test.go deleted file mode 100644 index ce429760..00000000 --- a/v2/internal/chainutil/store_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package chainutil - -import ( - "io" - "os" - "testing" - - "go.sia.tech/core/v2/chain" - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/types" -) - -func TestFlatStoreRecovery(t *testing.T) { - dir := t.TempDir() - sim := NewChainSim() - fs, _, err := NewFlatStore(dir, sim.Genesis) - if err != nil { - t.Fatal(err) - } - - invalidIndex := types.ChainIndex{Height: 9999} - if _, err := fs.Checkpoint(invalidIndex); err != chain.ErrUnknownIndex { - t.Fatal("Checkpoint returned no error for an invalid index") - } - if _, err := fs.BestIndex(invalidIndex.Height); err != chain.ErrUnknownIndex { - t.Fatal("BestIndex returned no error for an invalid block height") - } - - // add some blocks and flush meta - blocks := sim.MineBlocks(5) - for _, block := range blocks { - if err := fs.AddCheckpoint(consensus.Checkpoint{ - Block: block, - State: sim.State, - }); err != nil { - t.Fatal(err) - } else if err := fs.ExtendBest(sim.State.Index); err != nil { - t.Fatal(err) - } - } - if fs.Flush(); err != nil { - t.Fatal(err) - } - - // compare tips - if fs.meta.tip != sim.State.Index { - t.Fatal("meta tip mismatch", fs.meta.tip, sim.State.Index) - } else if index, err := fs.BestIndex(fs.meta.tip.Height); err != nil || index != fs.meta.tip { - t.Fatal("tip mismatch", index, fs.meta.tip) - } - goodTip := fs.meta.tip - - // add more blocks, then close without flushing - blocks = sim.MineBlocks(5) - for _, block := range blocks { - if err := fs.AddCheckpoint(consensus.Checkpoint{ - Block: block, - State: sim.State, - }); err != nil { - t.Fatal(err) - } else if err := fs.ExtendBest(sim.State.Index); err != nil { - t.Fatal(err) - } - } - - // simulate write failure by corrupting index, entry, and best files - for _, f := range []*os.File{fs.indexFile, fs.entryFile, fs.bestFile} { - f.Seek(-10, io.SeekEnd) - f.WriteString("garbagegarbage") - } - if index, err := fs.BestIndex(fs.meta.tip.Height); err != nil { - t.Fatal(err) - } else if index == fs.meta.tip { - t.Fatal("tip should not match after corruption") - } - - // reload fs; should recover to last good state - fs.indexFile.Close() - fs.entryFile.Close() - fs.bestFile.Close() - fs, tip, err := NewFlatStore(dir, sim.Genesis) - if err != nil { - t.Fatal(err) - } - if tip.State.Index != goodTip || fs.meta.tip != goodTip { - t.Fatal("tip mismatch", tip.State.Index, fs.meta.tip, goodTip) - } else if index, err := fs.BestIndex(goodTip.Height); err != nil || index != goodTip { - t.Fatal("tip mismatch", index, goodTip) - } - fs.Close() -} - -func TestEphemeralStore(t *testing.T) { - sim := NewChainSim() - es := NewEphemeralStore(sim.Genesis) - - // add some blocks - blocks := sim.MineBlocks(5) - for _, block := range blocks { - if err := es.AddCheckpoint(consensus.Checkpoint{ - Block: block, - State: sim.State, - }); err != nil { - t.Fatal(err) - } else if err := es.ExtendBest(sim.State.Index); err != nil { - t.Fatal(err) - } - } - // ephemeral store flush should always return nil - if err := es.Flush(); err != nil { - t.Fatal(err) - } - - tip, err := es.Header(sim.State.Index) - if err != nil { - t.Fatal(err) - } - // compare tips - if tip.Index() != sim.State.Index { - t.Fatal("tip mismatch", tip.Index(), sim.State.Index) - } else if index, err := es.BestIndex(tip.Height); err != nil || index != tip.Index() { - t.Fatal("tip mismatch", index, tip) - } - - invalidIndex := types.ChainIndex{Height: 9999} - if _, err := es.Checkpoint(invalidIndex); err != chain.ErrUnknownIndex { - t.Fatal("Checkpoint returned no error for an invalid index") - } - if _, err := es.BestIndex(invalidIndex.Height); err != chain.ErrUnknownIndex { - t.Fatal("BestIndex returned no error for an invalid block height") - } -} - -func BenchmarkFlatStore(b *testing.B) { - fs, _, err := NewFlatStore(b.TempDir(), consensus.Checkpoint{}) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - b.ReportAllocs() - - cp := consensus.Checkpoint{ - Block: types.Block{ - Transactions: make([]types.Transaction, 10), - }, - } - - for i := 0; i < b.N; i++ { - if err := fs.AddCheckpoint(cp); err != nil { - b.Fatal(err) - } - } -} diff --git a/v2/merkle/accumulator.go b/v2/merkle/accumulator.go deleted file mode 100644 index 8a483c67..00000000 --- a/v2/merkle/accumulator.go +++ /dev/null @@ -1,511 +0,0 @@ -package merkle - -import ( - "encoding/binary" - "encoding/json" - "errors" - "math/bits" - "sort" - "sync" - - "go.sia.tech/core/v2/types" -) - -// Pool for reducing heap allocations when hashing. This is only necessary -// because blake2b.New256 returns a hash.Hash interface, which prevents the -// compiler from doing escape analysis. Can be removed if we switch to an -// implementation whose constructor returns a concrete type. -var hasherPool = &sync.Pool{New: func() interface{} { return types.NewHasher() }} - -// An ElementLeaf represents a leaf in the ElementAccumulator Merkle tree. -type ElementLeaf struct { - types.StateElement - ElementHash types.Hash256 - Spent bool -} - -// Hash returns the leaf's hash, for direct use in the Merkle tree. -func (l ElementLeaf) Hash() types.Hash256 { - buf := make([]byte, 1+32+8+1) - buf[0] = leafHashPrefix - copy(buf[1:], l.ElementHash[:]) - binary.LittleEndian.PutUint64(buf[33:], l.LeafIndex) - if l.Spent { - buf[41] = 1 - } - return types.HashBytes(buf) -} - -// ProofRoot returns the root obtained from the leaf and its proof.. -func (l ElementLeaf) ProofRoot() types.Hash256 { - return ProofRoot(l.Hash(), l.LeafIndex, l.MerkleProof) -} - -// SiacoinLeaf returns the ElementLeaf for a SiacoinElement. -func SiacoinLeaf(e types.SiacoinElement, spent bool) ElementLeaf { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/leaf/siacoin") - e.ID.EncodeTo(h.E) - e.SiacoinOutput.EncodeTo(h.E) - h.E.WriteUint64(e.MaturityHeight) - return ElementLeaf{ - StateElement: e.StateElement, - ElementHash: h.Sum(), - Spent: spent, - } -} - -// SiafundLeaf returns the ElementLeaf for a SiafundElement. -func SiafundLeaf(e types.SiafundElement, spent bool) ElementLeaf { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/leaf/siafund") - e.ID.EncodeTo(h.E) - e.SiafundOutput.EncodeTo(h.E) - e.ClaimStart.EncodeTo(h.E) - return ElementLeaf{ - StateElement: e.StateElement, - ElementHash: h.Sum(), - Spent: spent, - } -} - -// FileContractLeaf returns the ElementLeaf for a FileContractElement. -func FileContractLeaf(e types.FileContractElement, spent bool) ElementLeaf { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/leaf/filecontract") - e.ID.EncodeTo(h.E) - e.FileContract.EncodeTo(h.E) - return ElementLeaf{ - StateElement: e.StateElement, - ElementHash: h.Sum(), - Spent: spent, - } -} - -// An Accumulator tracks the state of an unbounded number of leaves without -// storing the leaves themselves. -type Accumulator struct { - // A set of perfect Merkle trees, containing at most one tree at each - // height. Only the root of each tree is stored. - Trees [64]types.Hash256 - NumLeaves uint64 -} - -// hasTreeAtHeight returns true if the Accumulator contains a tree root at the -// specified height. -func (acc *Accumulator) hasTreeAtHeight(height int) bool { - return acc.NumLeaves&(1< startOfNewTree && j >= 0; j-- { - leaves[j].MerkleProof = append(leaves[j].MerkleProof, oldRoot) - } - for ; j > startOfOldTree && j >= 0; j-- { - leaves[j].MerkleProof = append(leaves[j].MerkleProof, h) - } - // Record the left- and right-hand roots in treeGrowth, where - // applicable. - curTreeIndex := (acc.NumLeaves + 1) - 1<= curTreeIndex { - treeGrowth[bit] = append(treeGrowth[bit], oldRoot) - } else if treeStartIndex >= prevTreeIndex { - treeGrowth[bit] = append(treeGrowth[bit], h) - } - } - // Merge with the existing tree at this height. Since we're always - // adding leaves on the right-hand side of the tree, the existing - // root is always the left-hand sibling. - h = NodeHash(oldRoot, h) - } - } - return treeGrowth -} - -// updateLeaves overwrites the specified leaves in the accumulator. It updates -// the Merkle proofs of each leaf, and returns the leaves (grouped by tree) for -// later use. -func (acc *ElementAccumulator) updateLeaves(leaves []ElementLeaf) [64][]ElementLeaf { - var recompute func(i, j uint64, leaves []ElementLeaf) types.Hash256 - recompute = func(i, j uint64, leaves []ElementLeaf) types.Hash256 { - height := bits.TrailingZeros64(j - i) // equivalent to log2(j-i), as j-i is always a power of two - if len(leaves) == 1 && height == 0 { - return leaves[0].Hash() - } - mid := (i + j) / 2 - left, right := splitLeaves(leaves, mid) - var leftRoot, rightRoot types.Hash256 - if len(left) == 0 { - leftRoot = right[0].MerkleProof[height-1] - } else { - leftRoot = recompute(i, mid, left) - for i := range right { - right[i].MerkleProof[height-1] = leftRoot - } - } - if len(right) == 0 { - rightRoot = left[0].MerkleProof[height-1] - } else { - rightRoot = recompute(mid, j, right) - for i := range left { - left[i].MerkleProof[height-1] = rightRoot - } - } - return NodeHash(leftRoot, rightRoot) - } - - // Group leaves by tree, and sort them by leaf index. - var trees [64][]ElementLeaf - sort.Slice(leaves, func(i, j int) bool { - if len(leaves[i].MerkleProof) != len(leaves[j].MerkleProof) { - return len(leaves[i].MerkleProof) < len(leaves[j].MerkleProof) - } - return leaves[i].LeafIndex < leaves[j].LeafIndex - }) - for len(leaves) > 0 { - i := 0 - for i < len(leaves) && len(leaves[i].MerkleProof) == len(leaves[0].MerkleProof) { - i++ - } - trees[len(leaves[0].MerkleProof)] = leaves[:i] - leaves = leaves[i:] - } - - // Recompute the root of each tree with updated leaves, and fill in the - // proof of each leaf. - for height, leaves := range &trees { - if len(leaves) == 0 { - continue - } - // Determine the range of leaf indices that comprise this tree. We can - // compute this efficiently by zeroing the least-significant bits of - // NumLeaves. (Zeroing these bits is equivalent to subtracting the - // number of leaves in all trees smaller than this one.) - start := clearBits(acc.NumLeaves, height+1) - end := start + 1<= eru.numLeaves { - panic("cannot update an element that is not present in the accumulator") - } - if mh := mergeHeight(eru.numLeaves, e.LeafIndex); mh <= len(e.MerkleProof) { - e.MerkleProof = e.MerkleProof[:mh-1] - } - updateProof(e, &eru.updated) -} - -func historyLeafHash(index types.ChainIndex) types.Hash256 { - buf := make([]byte, 1+8+32) - buf[0] = leafHashPrefix - binary.LittleEndian.PutUint64(buf[1:], index.Height) - copy(buf[9:], index.ID[:]) - return types.HashBytes(buf) -} - -func historyProofRoot(index types.ChainIndex, proof []types.Hash256) types.Hash256 { - return ProofRoot(historyLeafHash(index), index.Height, proof) -} - -// A HistoryAccumulator tracks the state of all ChainIndexs in a chain without -// storing the full sequence of indexes itself. -type HistoryAccumulator struct { - Accumulator -} - -// Contains returns true if the accumulator contains the given index. -func (acc *HistoryAccumulator) Contains(index types.ChainIndex, proof []types.Hash256) bool { - return acc.hasTreeAtHeight(len(proof)) && acc.Trees[len(proof)] == historyProofRoot(index, proof) -} - -// ApplyBlock integrates a ChainIndex into the accumulator, producing a -// HistoryApplyUpdate. -func (acc *HistoryAccumulator) ApplyBlock(index types.ChainIndex) (hau HistoryApplyUpdate) { - h := historyLeafHash(index) - i := 0 - for ; acc.hasTreeAtHeight(i); i++ { - hau.proof = append(hau.proof, acc.Trees[i]) - hau.growth = append(hau.growth, h) - h = NodeHash(acc.Trees[i], h) - } - acc.Trees[i] = h - acc.NumLeaves++ - return -} - -// RevertBlock produces a HistoryRevertUpdate from a ChainIndex. -func (acc *HistoryAccumulator) RevertBlock(index types.ChainIndex) HistoryRevertUpdate { - return HistoryRevertUpdate{index} -} - -// A HistoryApplyUpdate reflects the changes to a HistoryAccumulator resulting -// from the application of a block. -type HistoryApplyUpdate struct { - proof []types.Hash256 - growth []types.Hash256 -} - -// HistoryProof returns a history proof for the applied block. To prevent -// aliasing, it always returns new memory. -func (hau *HistoryApplyUpdate) HistoryProof() []types.Hash256 { - return append([]types.Hash256(nil), hau.proof...) -} - -// UpdateProof updates the supplied history proof to incorporate changes made to -// the chain history. The proof must be up-to-date; if it is not, UpdateProof -// may panic. -func (hau *HistoryApplyUpdate) UpdateProof(proof *[]types.Hash256) { - if len(hau.growth) > len(*proof) { - *proof = append(*proof, hau.growth[len(*proof)]) - *proof = append(*proof, hau.proof[len(*proof):]...) - } -} - -// UpdateWindowProof updates the supplied storage proof to incorporate changes -// made to the chain history. The proof must be up-to-date; if it is not, -// UpdateWindowProof may panic. -func (hau *HistoryApplyUpdate) UpdateWindowProof(sp *types.StorageProof) { - hau.UpdateProof(&sp.WindowProof) -} - -// A HistoryRevertUpdate reflects the changes to a HistoryAccumulator resulting -// from the removal of a block. -type HistoryRevertUpdate struct { - index types.ChainIndex -} - -// UpdateProof updates the supplied history proof to incorporate the changes -// made to the chain history. The proof must be up-to-date; if it is not, -// UpdateWindowProof may panic. -func (hru *HistoryRevertUpdate) UpdateProof(height uint64, proof *[]types.Hash256) { - if mh := mergeHeight(hru.index.Height, height); mh <= len(*proof) { - *proof = (*proof)[:mh-1] - } -} - -// UpdateWindowProof updates the supplied storage proof to incorporate the -// changes made to the chain history. The proof must be up-to-date; if it is -// not, UpdateWindowProof may panic. -func (hru *HistoryRevertUpdate) UpdateWindowProof(sp *types.StorageProof) { - hru.UpdateProof(sp.WindowStart.Height, &sp.WindowProof) -} diff --git a/v2/merkle/accumulator_test.go b/v2/merkle/accumulator_test.go deleted file mode 100644 index 249f75d0..00000000 --- a/v2/merkle/accumulator_test.go +++ /dev/null @@ -1,464 +0,0 @@ -package merkle - -import ( - "reflect" - "testing" - - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -func TestUpdateLeavesSiacoin(t *testing.T) { - outputs := make([]types.SiacoinElement, 8) - leaves := make([]ElementLeaf, len(outputs)) - for i := range outputs { - leaves[i] = SiacoinLeaf(outputs[i], false) - } - var acc ElementAccumulator - acc.addLeaves(leaves) - for i := range outputs { - outputs[i].StateElement = leaves[i].StateElement - } - for _, leaf := range leaves { - if !acc.containsLeaf(leaf) { - t.Fatal("accumulator missing leaf that was added to it") - } - } - - updated := []ElementLeaf{ - SiacoinLeaf(outputs[0], true), - SiacoinLeaf(outputs[2], true), - SiacoinLeaf(outputs[3], true), - SiacoinLeaf(outputs[5], true), - SiacoinLeaf(outputs[6], true), - } - - acc.updateLeaves(updated) - - var acc2 Accumulator - addOutput := func(o types.SiacoinElement, spent bool) { - // seek to first open slot, merging nodes as we go - root := SiacoinLeaf(o, spent).Hash() - i := 0 - for ; acc2.hasTreeAtHeight(i); i++ { - root = NodeHash(acc2.Trees[i], root) - } - acc2.Trees[i] = root - acc2.NumLeaves++ - } - for i, o := range outputs { - switch i { - case 0, 2, 3, 5, 6: - addOutput(o, true) - if !acc.ContainsSpentSiacoinElement(o) { - t.Fatal("accumulator missing spent siacoin element") - } - default: - addOutput(o, false) - if acc.ContainsSpentSiacoinElement(o) { - t.Fatal("accumulator missing unspent siacoin element") - } - } - } - for i := range acc2.Trees { - if acc2.hasTreeAtHeight(i) { - if !acc2.hasTreeAtHeight(i) { - t.Fatal("mismatch") - } - if acc2.Trees[i] != acc.Trees[i] { - t.Fatal("mismatch") - } - } - } -} - -func TestUpdateLeavesSiafund(t *testing.T) { - outputs := make([]types.SiafundElement, 8) - leaves := make([]ElementLeaf, len(outputs)) - for i := range outputs { - leaves[i] = SiafundLeaf(outputs[i], false) - } - var acc ElementAccumulator - acc.addLeaves(leaves) - for i := range outputs { - outputs[i].StateElement = leaves[i].StateElement - } - for _, leaf := range leaves { - if !acc.containsLeaf(leaf) { - t.Fatal("accumulator missing leaf that was added to it") - } - } - - updated := []ElementLeaf{ - SiafundLeaf(outputs[0], true), - SiafundLeaf(outputs[2], true), - SiafundLeaf(outputs[3], true), - SiafundLeaf(outputs[5], true), - SiafundLeaf(outputs[6], true), - } - - acc.updateLeaves(updated) - - var acc2 Accumulator - addOutput := func(o types.SiafundElement, spent bool) { - // seek to first open slot, merging nodes as we go - root := SiafundLeaf(o, spent).Hash() - i := 0 - for ; acc2.hasTreeAtHeight(i); i++ { - root = NodeHash(acc2.Trees[i], root) - } - acc2.Trees[i] = root - acc2.NumLeaves++ - } - for i, o := range outputs { - switch i { - case 0, 2, 3, 5, 6: - addOutput(o, true) - if !acc.ContainsSpentSiafundElement(o) { - t.Fatal("accumulator missing spent siafund element") - } - default: - addOutput(o, false) - if acc.ContainsSpentSiafundElement(o) { - t.Fatal("accumulator missing unspent siafund element") - } - } - } - for i := range acc2.Trees { - if acc2.hasTreeAtHeight(i) { - if !acc2.hasTreeAtHeight(i) { - t.Fatal("mismatch") - } - if acc2.Trees[i] != acc.Trees[i] { - t.Fatal("mismatch") - } - } - } -} - -func TestApplyBlock(t *testing.T) { - // create some elements and add them to the initial accumulator - sces := make([]types.SiacoinElement, 7) - sfes := make([]types.SiafundElement, 7) - fces := make([]types.FileContractElement, 7) - leaves := make([]ElementLeaf, 0, len(sces)+len(sfes)+len(fces)) - for i := range sces { - sces[i].ID.Index = uint64(len(leaves)) - leaves = append(leaves, SiacoinLeaf(sces[i], false)) - } - for i := range sfes { - sfes[i].ID.Index = uint64(len(leaves)) - leaves = append(leaves, SiafundLeaf(sfes[i], false)) - } - for i := range fces { - fces[i].ID.Index = uint64(len(leaves)) - leaves = append(leaves, FileContractLeaf(fces[i], false)) - } - var acc ElementAccumulator - acc.NumLeaves = 6 - acc.ApplyBlock(nil, leaves) - for i := range sces { - sces[i].StateElement = leaves[i].StateElement - } - for i := range sfes { - sfes[i].StateElement = leaves[len(sces)+i].StateElement - } - for i := range fces { - fces[i].StateElement = leaves[len(sces)+len(sfes)+i].StateElement - } - // all leaves should be present in the accumulator - for _, sce := range sces { - if !acc.ContainsUnspentSiacoinElement(sce) || acc.ContainsSpentSiacoinElement(sce) { - t.Fatal("unspent siacoin element should be reflected in accumulator") - } - } - for _, sfe := range sfes { - if !acc.ContainsUnspentSiafundElement(sfe) || acc.ContainsSpentSiafundElement(sfe) { - t.Fatal("unspent siafund element should be reflected in accumulator") - } - } - for _, fce := range fces { - if !acc.ContainsUnresolvedFileContractElement(fce) || acc.ContainsResolvedFileContractElement(fce) { - t.Fatal("unresolved file contract should be reflected in accumulator") - } - } - - // mark some of the leaves as spent - spent := []ElementLeaf{ - SiacoinLeaf(sces[0], true), - SiafundLeaf(sfes[0], true), - FileContractLeaf(fces[0], true), - } - // acc and elements will be modified; save copies for later - oldAcc := acc - oldSpent := append([]ElementLeaf(nil), spent...) - for i := range oldSpent { - oldSpent[i].MerkleProof = append([]types.Hash256(nil), oldSpent[i].MerkleProof...) - } - eau := acc.ApplyBlock(spent, nil) - // update proofs - for i := range sces { - eau.UpdateElementProof(&sces[i].StateElement) - } - for i := range sfes { - eau.UpdateElementProof(&sfes[i].StateElement) - } - for i := range fces { - eau.UpdateElementProof(&fces[i].StateElement) - } - // the spent leaves should be marked as such in the accumulator - if !acc.ContainsSpentSiacoinElement(sces[0]) || acc.ContainsUnspentSiacoinElement(sces[0]) { - t.Fatal("spent siacoin element should be reflected in accumulator") - } - if !acc.ContainsSpentSiafundElement(sfes[0]) || acc.ContainsUnspentSiafundElement(sfes[0]) { - t.Fatal("spent siafund element should be reflected in accumulator") - } - if !acc.ContainsResolvedFileContractElement(fces[0]) || acc.ContainsUnresolvedFileContractElement(fces[0]) { - t.Fatal("resolved file contract should be reflected in accumulator") - } - // other leaves should still be unspent - for _, sce := range sces[1:] { - if !acc.ContainsUnspentSiacoinElement(sce) || acc.ContainsSpentSiacoinElement(sce) { - t.Fatal("unspent siacoin element should be reflected in accumulator") - } - } - for _, sfe := range sfes[1:] { - if !acc.ContainsUnspentSiafundElement(sfe) || acc.ContainsSpentSiafundElement(sfe) { - t.Fatal("unspent siafund element should be reflected in accumulator") - } - } - for _, fce := range fces[1:] { - if !acc.ContainsUnresolvedFileContractElement(fce) || acc.ContainsResolvedFileContractElement(fce) { - t.Fatal("unresolved file contract should be reflected in accumulator") - } - } - - // restore old copies and revert the block - acc = oldAcc - spent = oldSpent - for i := range spent { - spent[i].Spent = false - } - eru := acc.RevertBlock(spent) - // update proofs - for i := range sces { - eru.UpdateElementProof(&sces[i].StateElement) - } - for i := range sfes { - eru.UpdateElementProof(&sfes[i].StateElement) - } - for i := range fces { - eru.UpdateElementProof(&fces[i].StateElement) - } - - // all leaves should be unspent again - for _, sce := range sces { - if !acc.ContainsUnspentSiacoinElement(sce) || acc.ContainsSpentSiacoinElement(sce) { - t.Fatal("unspent siacoin element should be reflected in accumulator") - } - } - for _, sfe := range sfes { - if !acc.ContainsUnspentSiafundElement(sfe) || acc.ContainsSpentSiafundElement(sfe) { - t.Fatal("unspent siafund element should be reflected in accumulator") - } - } - for _, fce := range fces { - if !acc.ContainsUnresolvedFileContractElement(fce) || acc.ContainsResolvedFileContractElement(fce) { - t.Fatal("unresolved file contract should be reflected in accumulator") - } - } -} - -func TestHistoryAccumulator(t *testing.T) { - blocks := make([]types.ChainIndex, 16) - for i := range blocks { - blocks[i].Height = uint64(i) - frand.Read(blocks[i].ID[:]) - } - - // test every subset of blocks 0..n - for n := 1; n < len(blocks); n++ { - // insert blocks into accumulator - var acc HistoryAccumulator - var accs []HistoryAccumulator - proofs := make([][]types.Hash256, n) - for i, index := range blocks[:n] { - accs = append(accs, acc) - hau := acc.ApplyBlock(index) - proofs[i] = hau.HistoryProof() - for j := 0; j < i; j++ { - hau.UpdateProof(&proofs[j]) - } - } - // check that all blocks are present - for i, index := range blocks[:n] { - if !acc.Contains(index, proofs[i]) { - t.Fatal("history accumulator missing block") - } - } - // check that using the wrong proof doesn't work - for _, proof := range proofs[1:] { - if acc.Contains(blocks[0], proof) { - t.Fatal("history accumulator claims to contain block with wrong proof") - } - } - - // revert each block - for i := n - 1; i >= 0; i-- { - // revert latest block - acc := accs[i] - eru := acc.RevertBlock(blocks[i]) - // update proofs of remaining blocks - for j := 0; j < i; j++ { - eru.UpdateProof(uint64(j), &proofs[j]) - } - // check that blocks < i are still present, and blocks >= i are not - for j, index := range blocks[:n] { - if j < i && !acc.Contains(index, proofs[j]) { - t.Fatal("history accumulator missing block") - } else if acc.Contains(index, proofs[i]) { - t.Fatal("history accumulator contains reverted block") - } - } - } - } -} - -func TestMarshalJSON(t *testing.T) { - eq := func(a, b HistoryAccumulator) bool { - if a.NumLeaves != b.NumLeaves { - return false - } - for i := range a.Trees { - if a.hasTreeAtHeight(i) && a.Trees[i] != b.Trees[i] { - return false - } - } - return true - } - var ha HistoryAccumulator - for i := 0; i < 16; i++ { - ha.ApplyBlock(types.ChainIndex{Height: uint64(i)}) - js, _ := ha.MarshalJSON() - var ha2 HistoryAccumulator - if err := ha2.UnmarshalJSON(js); err != nil { - t.Fatal(err) - } else if !eq(ha, ha2) { - t.Fatal("accumulator marshal/unmarshal failed") - } - } -} - -func TestMultiproof(t *testing.T) { - outputs := make([]types.SiacoinElement, 8) - leaves := make([]types.Hash256, len(outputs)) - for i := range outputs { - outputs[i].LeafIndex = uint64(i) - outputs[i].ID.Index = uint64(i) - leaves[i] = SiacoinLeaf(outputs[i], false).Hash() - } - node01 := NodeHash(leaves[0], leaves[1]) - node23 := NodeHash(leaves[2], leaves[3]) - node45 := NodeHash(leaves[4], leaves[5]) - node67 := NodeHash(leaves[6], leaves[7]) - node03 := NodeHash(node01, node23) - node47 := NodeHash(node45, node67) - outputs[0].MerkleProof = []types.Hash256{leaves[1], node23, node47} - outputs[1].MerkleProof = []types.Hash256{leaves[0], node23, node47} - outputs[2].MerkleProof = []types.Hash256{leaves[3], node01, node47} - outputs[3].MerkleProof = []types.Hash256{leaves[2], node01, node47} - outputs[4].MerkleProof = []types.Hash256{leaves[5], node67, node03} - outputs[5].MerkleProof = []types.Hash256{leaves[4], node67, node03} - outputs[6].MerkleProof = []types.Hash256{leaves[7], node45, node03} - outputs[7].MerkleProof = []types.Hash256{leaves[6], node45, node03} - - tests := []struct { - inputs []int - proof []types.Hash256 - }{ - { - inputs: []int{0}, - proof: []types.Hash256{leaves[1], node23, node47}, - }, - { - inputs: []int{1, 2, 3}, - proof: []types.Hash256{leaves[0], node47}, - }, - { - inputs: []int{7, 6, 0, 2, 3}, - proof: []types.Hash256{leaves[1], node45}, - }, - { - inputs: []int{7, 6, 5, 4, 3, 2, 1, 0}, - proof: nil, - }, - } - for _, test := range tests { - txns := []types.Transaction{{SiacoinInputs: make([]types.SiacoinInput, len(test.inputs))}} - for i, j := range test.inputs { - txns[0].SiacoinInputs[i].Parent = outputs[j] - } - - old := txns[0].DeepCopy() - // compute multiproof - proof := ComputeMultiproof(txns) - if !reflect.DeepEqual(proof, test.proof) { - t.Error("wrong proof generated") - } - for _, txn := range txns { - for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].Parent.MerkleProof = make([]types.Hash256, len(txn.SiacoinInputs[i].Parent.MerkleProof)) - } - } - // expand multiproof and check roundtrip - ExpandMultiproof(txns, proof) - if !reflect.DeepEqual(txns[0], old) { - t.Fatal("\n", txns[0], "\n", old) - } - } -} - -func BenchmarkSiacoinLeafHash(b *testing.B) { - var o types.SiacoinElement - for i := 0; i < b.N; i++ { - SiacoinLeaf(o, false).Hash() - } -} - -func BenchmarkUpdateExistingObjects(b *testing.B) { - outputs := make([]types.SiacoinElement, 1000) - leaves := make([]ElementLeaf, len(outputs)) - for i := range outputs { - leaves[i] = SiacoinLeaf(outputs[i], false) - } - var acc ElementAccumulator - acc.addLeaves(leaves) - for i := range outputs { - outputs[i].StateElement = leaves[i].StateElement - } - - proofs := make([][]types.Hash256, len(outputs)) - for i := range proofs { - proofs[i] = append([]types.Hash256(nil), outputs[i].MerkleProof...) - } - indices := frand.Perm(len(outputs))[:len(outputs)/2] - updated := make([]ElementLeaf, len(indices)) - for i, j := range indices { - updated[i] = SiacoinLeaf(outputs[j], true) - } - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - // reset everything - b.StopTimer() - acc2 := acc - for i, j := range indices { - copy(updated[i].MerkleProof, proofs[j]) - } - b.StartTimer() - - acc2.updateLeaves(updated) - } -} diff --git a/v2/merkle/merkle.go b/v2/merkle/merkle.go deleted file mode 100644 index cf77a30a..00000000 --- a/v2/merkle/merkle.go +++ /dev/null @@ -1,56 +0,0 @@ -package merkle - -import ( - "math/bits" - - "go.sia.tech/core/v2/internal/blake2b" - "go.sia.tech/core/v2/types" -) - -// from RFC 6961 -const leafHashPrefix = 0x00 -const nodeHashPrefix = 0x01 - -// mergeHeight returns the height at which the proof paths of x and y merge. -func mergeHeight(x, y uint64) int { return bits.Len64(x ^ y) } - -// clearBits clears the n least significant bits of x. -func clearBits(x uint64, n int) uint64 { return x &^ (1<= mid }) - return ls[:split], ls[split:] -} - -func leavesByTree(txns []types.Transaction) [64][]ElementLeaf { - var trees [64][]ElementLeaf - addLeaf := func(l ElementLeaf) { - trees[len(l.MerkleProof)] = append(trees[len(l.MerkleProof)], l) - } - for _, txn := range txns { - for _, in := range txn.SiacoinInputs { - if in.Parent.LeafIndex != types.EphemeralLeafIndex { - addLeaf(SiacoinLeaf(in.Parent, false)) - } - } - for _, in := range txn.SiafundInputs { - addLeaf(SiafundLeaf(in.Parent, false)) - } - for _, rev := range txn.FileContractRevisions { - addLeaf(FileContractLeaf(rev.Parent, false)) - } - for _, res := range txn.FileContractResolutions { - addLeaf(FileContractLeaf(res.Parent, false)) - } - } - for _, leaves := range trees { - sort.Slice(leaves, func(i, j int) bool { - return leaves[i].LeafIndex < leaves[j].LeafIndex - }) - } - return trees -} - -// MultiproofSize computes the size of a multiproof for the given transactions. -func MultiproofSize(txns []types.Transaction) int { - var proofSize func(i, j uint64, leaves []ElementLeaf) int - proofSize = func(i, j uint64, leaves []ElementLeaf) int { - height := bits.TrailingZeros64(j - i) - if len(leaves) == 0 { - return 1 - } else if height == 0 { - return 0 - } - mid := (i + j) / 2 - left, right := splitLeaves(leaves, mid) - return proofSize(i, mid, left) + proofSize(mid, j, right) - } - - size := 0 - for height, leaves := range leavesByTree(txns) { - if len(leaves) == 0 { - continue - } - start := clearBits(leaves[0].LeafIndex, height+1) - end := start + 1<= 64 { - d.SetErr(errors.New("impossibly-large MerkleProof")) - } -} - -type compressedSiacoinElement types.SiacoinElement - -func (sce compressedSiacoinElement) EncodeTo(e *types.Encoder) { - (compressedStateElement)(sce.StateElement).EncodeTo(e) - sce.SiacoinOutput.EncodeTo(e) - e.WriteUint64(sce.MaturityHeight) -} - -func (sce *compressedSiacoinElement) DecodeFrom(d *types.Decoder) { - (*compressedStateElement)(&sce.StateElement).DecodeFrom(d) - sce.SiacoinOutput.DecodeFrom(d) - sce.MaturityHeight = d.ReadUint64() -} - -type compressedSiacoinInput types.SiacoinInput - -func (in compressedSiacoinInput) EncodeTo(e *types.Encoder) { - (compressedSiacoinElement)(in.Parent).EncodeTo(e) - in.SpendPolicy.EncodeTo(e) - e.WritePrefix(len(in.Signatures)) - for _, sig := range in.Signatures { - sig.EncodeTo(e) - } -} - -func (in *compressedSiacoinInput) DecodeFrom(d *types.Decoder) { - (*compressedSiacoinElement)(&in.Parent).DecodeFrom(d) - in.SpendPolicy.DecodeFrom(d) - in.Signatures = make([]types.Signature, d.ReadPrefix()) - for i := range in.Signatures { - in.Signatures[i].DecodeFrom(d) - } -} - -type compressedSiafundElement types.SiafundElement - -func (sfe compressedSiafundElement) EncodeTo(e *types.Encoder) { - (compressedStateElement)(sfe.StateElement).EncodeTo(e) - sfe.SiafundOutput.EncodeTo(e) - sfe.ClaimStart.EncodeTo(e) -} - -func (sfe *compressedSiafundElement) DecodeFrom(d *types.Decoder) { - (*compressedStateElement)(&sfe.StateElement).DecodeFrom(d) - sfe.SiafundOutput.DecodeFrom(d) - sfe.ClaimStart.DecodeFrom(d) -} - -type compressedSiafundInput types.SiafundInput - -func (in compressedSiafundInput) EncodeTo(e *types.Encoder) { - (compressedSiafundElement)(in.Parent).EncodeTo(e) - in.ClaimAddress.EncodeTo(e) - in.SpendPolicy.EncodeTo(e) - e.WritePrefix(len(in.Signatures)) - for _, sig := range in.Signatures { - sig.EncodeTo(e) - } -} - -func (in *compressedSiafundInput) DecodeFrom(d *types.Decoder) { - (*compressedSiafundElement)(&in.Parent).DecodeFrom(d) - in.ClaimAddress.DecodeFrom(d) - in.SpendPolicy.DecodeFrom(d) - in.Signatures = make([]types.Signature, d.ReadPrefix()) - for i := range in.Signatures { - in.Signatures[i].DecodeFrom(d) - } -} - -type compressedFileContractElement types.FileContractElement - -func (fce compressedFileContractElement) EncodeTo(e *types.Encoder) { - (compressedStateElement)(fce.StateElement).EncodeTo(e) - fce.FileContract.EncodeTo(e) -} - -func (fce *compressedFileContractElement) DecodeFrom(d *types.Decoder) { - (*compressedStateElement)(&fce.StateElement).DecodeFrom(d) - fce.FileContract.DecodeFrom(d) -} - -type compressedFileContractRevision types.FileContractRevision - -func (rev compressedFileContractRevision) EncodeTo(e *types.Encoder) { - (compressedFileContractElement)(rev.Parent).EncodeTo(e) - rev.Revision.EncodeTo(e) -} - -func (rev *compressedFileContractRevision) DecodeFrom(d *types.Decoder) { - (*compressedFileContractElement)(&rev.Parent).DecodeFrom(d) - rev.Revision.DecodeFrom(d) -} - -type compressedFileContractResolution types.FileContractResolution - -func (res compressedFileContractResolution) EncodeTo(e *types.Encoder) { - (compressedFileContractElement)(res.Parent).EncodeTo(e) - var fields uint8 - for i, b := range [...]bool{ - (*types.FileContractResolution)(&res).HasRenewal(), - (*types.FileContractResolution)(&res).HasStorageProof(), - (*types.FileContractResolution)(&res).HasFinalization(), - } { - if b { - fields |= 1 << i - } - } - e.WriteUint8(fields) - if fields&(1<<0) != 0 { - res.Renewal.EncodeTo(e) - } - if fields&(1<<1) != 0 { - res.StorageProof.EncodeTo(e) - } - if fields&(1<<2) != 0 { - res.Finalization.EncodeTo(e) - } -} - -func (res *compressedFileContractResolution) DecodeFrom(d *types.Decoder) { - (*compressedFileContractElement)(&res.Parent).DecodeFrom(d) - fields := d.ReadUint8() - if fields&(1<<0) != 0 { - res.Renewal.DecodeFrom(d) - } - if fields&(1<<1) != 0 { - res.StorageProof.DecodeFrom(d) - } - if fields&(1<<2) != 0 { - res.Finalization.DecodeFrom(d) - } -} - -type compressedTransaction types.Transaction - -func (txn compressedTransaction) EncodeTo(e *types.Encoder) { - const version = 1 - e.WriteUint8(version) - - var fields uint64 - for i, b := range [...]bool{ - len(txn.SiacoinInputs) != 0, - len(txn.SiacoinOutputs) != 0, - len(txn.SiafundInputs) != 0, - len(txn.SiafundOutputs) != 0, - len(txn.FileContracts) != 0, - len(txn.FileContractRevisions) != 0, - len(txn.FileContractResolutions) != 0, - len(txn.Attestations) != 0, - len(txn.ArbitraryData) != 0, - txn.NewFoundationAddress != types.VoidAddress, - !txn.MinerFee.IsZero(), - } { - if b { - fields |= 1 << i - } - } - e.WriteUint64(fields) - - if fields&(1<<0) != 0 { - e.WritePrefix(len(txn.SiacoinInputs)) - for _, in := range txn.SiacoinInputs { - (compressedSiacoinInput)(in).EncodeTo(e) - } - } - if fields&(1<<1) != 0 { - e.WritePrefix(len(txn.SiacoinOutputs)) - for _, out := range txn.SiacoinOutputs { - out.EncodeTo(e) - } - } - if fields&(1<<2) != 0 { - e.WritePrefix(len(txn.SiafundInputs)) - for _, in := range txn.SiafundInputs { - (compressedSiafundInput)(in).EncodeTo(e) - } - } - if fields&(1<<3) != 0 { - e.WritePrefix(len(txn.SiafundOutputs)) - for _, out := range txn.SiafundOutputs { - out.EncodeTo(e) - } - } - if fields&(1<<4) != 0 { - e.WritePrefix(len(txn.FileContracts)) - for _, fc := range txn.FileContracts { - fc.EncodeTo(e) - } - } - if fields&(1<<5) != 0 { - e.WritePrefix(len(txn.FileContractRevisions)) - for _, rev := range txn.FileContractRevisions { - (compressedFileContractRevision)(rev).EncodeTo(e) - } - } - if fields&(1<<6) != 0 { - e.WritePrefix(len(txn.FileContractResolutions)) - for _, res := range txn.FileContractResolutions { - (compressedFileContractResolution)(res).EncodeTo(e) - } - } - if fields&(1<<7) != 0 { - e.WritePrefix(len(txn.Attestations)) - for _, a := range txn.Attestations { - a.EncodeTo(e) - } - } - if fields&(1<<8) != 0 { - e.WriteBytes(txn.ArbitraryData) - } - if fields&(1<<9) != 0 { - txn.NewFoundationAddress.EncodeTo(e) - } - if fields&(1<<10) != 0 { - txn.MinerFee.EncodeTo(e) - } -} - -func (txn *compressedTransaction) DecodeFrom(d *types.Decoder) { - if version := d.ReadUint8(); version != 1 { - d.SetErr(fmt.Errorf("unsupported transaction version (%v)", version)) - return - } - - fields := d.ReadUint64() - - if fields&(1<<0) != 0 { - txn.SiacoinInputs = make([]types.SiacoinInput, d.ReadPrefix()) - for i := range txn.SiacoinInputs { - (*compressedSiacoinInput)(&txn.SiacoinInputs[i]).DecodeFrom(d) - } - } - if fields&(1<<1) != 0 { - txn.SiacoinOutputs = make([]types.SiacoinOutput, d.ReadPrefix()) - for i := range txn.SiacoinOutputs { - txn.SiacoinOutputs[i].DecodeFrom(d) - } - } - if fields&(1<<2) != 0 { - txn.SiafundInputs = make([]types.SiafundInput, d.ReadPrefix()) - for i := range txn.SiafundInputs { - (*compressedSiafundInput)(&txn.SiafundInputs[i]).DecodeFrom(d) - } - } - if fields&(1<<3) != 0 { - txn.SiafundOutputs = make([]types.SiafundOutput, d.ReadPrefix()) - for i := range txn.SiafundOutputs { - txn.SiafundOutputs[i].DecodeFrom(d) - } - } - if fields&(1<<4) != 0 { - txn.FileContracts = make([]types.FileContract, d.ReadPrefix()) - for i := range txn.FileContracts { - txn.FileContracts[i].DecodeFrom(d) - } - } - if fields&(1<<5) != 0 { - txn.FileContractRevisions = make([]types.FileContractRevision, d.ReadPrefix()) - for i := range txn.FileContractRevisions { - (*compressedFileContractRevision)(&txn.FileContractRevisions[i]).DecodeFrom(d) - } - } - if fields&(1<<6) != 0 { - txn.FileContractResolutions = make([]types.FileContractResolution, d.ReadPrefix()) - for i := range txn.FileContractResolutions { - (*compressedFileContractResolution)(&txn.FileContractResolutions[i]).DecodeFrom(d) - } - } - if fields&(1<<7) != 0 { - txn.Attestations = make([]types.Attestation, d.ReadPrefix()) - for i := range txn.Attestations { - txn.Attestations[i].DecodeFrom(d) - } - } - if fields&(1<<8) != 0 { - txn.ArbitraryData = d.ReadBytes() - } - if fields&(1<<9) != 0 { - txn.NewFoundationAddress.DecodeFrom(d) - } - if fields&(1<<10) != 0 { - txn.MinerFee.DecodeFrom(d) - } -} diff --git a/v2/merkle/multiproof_test.go b/v2/merkle/multiproof_test.go deleted file mode 100644 index 648ad838..00000000 --- a/v2/merkle/multiproof_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package merkle_test - -import ( - "bytes" - "io" - "math" - "math/rand" - "reflect" - "testing" - - "go.sia.tech/core/v2/internal/chainutil" - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/types" -) - -func TestEncoding(t *testing.T) { - // NOTE: Multiproof encoding only works with "real" blocks -- we can't - // generate fake Merkle proofs randomly, because they won't share nodes with - // each other the way they should. - - sim := chainutil.NewChainSim() - block := sim.MineBlocks(100)[99] - - // to prevent nil/[] mismatches, roundtrip each transaction with standard encoding - for i := range block.Transactions { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - block.Transactions[i].EncodeTo(e) - e.Flush() - d := types.NewDecoder(io.LimitedReader{R: &buf, N: math.MaxInt64}) - block.Transactions[i].DecodeFrom(d) - } - - var buf bytes.Buffer - e := types.NewEncoder(&buf) - d := types.NewDecoder(io.LimitedReader{R: &buf, N: math.MaxInt64}) - (*merkle.CompressedBlock)(&block).EncodeTo(e) - e.Flush() - - var read types.Block - (*merkle.CompressedBlock)(&read).DecodeFrom(d) - if err := d.Err(); err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(block, read) { - t.Fatalf("CompressedBlock did not survive roundtrip: expected %v, got %v", block, read) - } -} - -func TestBlockCompression(t *testing.T) { - ratio := func(b types.Block) float64 { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - e.WriteUint8(1) // version - b.Header.EncodeTo(e) - e.WritePrefix(len(b.Transactions)) - for i := range b.Transactions { - b.Transactions[i].EncodeTo(e) - } - e.Flush() - uncompressed := buf.Len() - - buf.Reset() - (*merkle.CompressedBlock)(&b).EncodeTo(e) - e.Flush() - compressed := buf.Len() - - return float64(compressed) / float64(uncompressed) - } - - // empty block - b := types.Block{} - if r := ratio(b); r != 1 { - t.Errorf("empty block compression ratio: expected %.3g, got %.3g", 1.0, r) - } - - // 10 empty transactions - b = types.Block{Transactions: make([]types.Transaction, 10)} - if r := ratio(b); r != 1 { - t.Errorf("empty txns compression ratio: expected %.3g, got %.3g", 1.0, r) - } - - // 10 random transactions - rng := rand.New(rand.NewSource(0)) - sim := chainutil.NewChainSim() - for i, minedBlock := range sim.MineBlocks(10) { - b.Transactions[i] = minedBlock.Transactions[rng.Intn(len(minedBlock.Transactions))] - } - if r := ratio(b); r >= 0.9 { - t.Errorf("random txns compression ratio: expected <%.3g, got %.3g", 0.9, r) - } - - // a simulated block - b = sim.MineBlock() - if r := ratio(b); r >= 0.9 { - t.Errorf("simulated block compression ratio: expected <%.3g, got %.3g", 0.9, r) - } -} diff --git a/v2/net/gateway/peer.go b/v2/net/gateway/peer.go deleted file mode 100644 index e5a9c1fd..00000000 --- a/v2/net/gateway/peer.go +++ /dev/null @@ -1,149 +0,0 @@ -package gateway - -import ( - "errors" - "fmt" - "net" - - "go.sia.tech/core/v2/net/rpc" - "go.sia.tech/core/v2/types" - - "go.sia.tech/mux" - "lukechampine.com/frand" -) - -const protocolVersion = 2 - -// A UniqueID is a randomly-generated nonce that helps prevent self-connections -// and double-connections. -type UniqueID [8]byte - -// GenerateUniqueID returns a random UniqueID. -func GenerateUniqueID() (id UniqueID) { - frand.Read(id[:]) - return -} - -type rpcHeader struct { - GenesisID types.BlockID - UniqueID [8]byte -} - -func validateHeader(ours, theirs rpcHeader) error { - if theirs.GenesisID != ours.GenesisID { - return errors.New("peer has different genesis block") - } else if theirs.UniqueID == ours.UniqueID { - return errors.New("peer has same unique ID as us") - } - return nil -} - -func (h *rpcHeader) EncodeTo(e *types.Encoder) { - h.GenesisID.EncodeTo(e) - e.Write(h.UniqueID[:]) -} - -func (h *rpcHeader) DecodeFrom(d *types.Decoder) { - h.GenesisID.DecodeFrom(d) - d.Read(h.UniqueID[:]) -} - -func (h *rpcHeader) MaxLen() int { - return 1024 // arbitrary -} - -// A Session is an ongoing exchange of RPCs via the gateway protocol. -type Session struct { - *mux.Mux - RemoteAddr string - RemoteID UniqueID -} - -// DialSession initiates the gateway handshake with a peer, establishing a -// Session. -func DialSession(conn net.Conn, genesisID types.BlockID, uid UniqueID) (_ *Session, err error) { - m, err := mux.DialAnonymous(conn) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - m.Close() - } - }() - s := m.DialStream() - defer s.Close() - - // exchange versions - var buf [1]byte - if _, err := s.Write([]byte{protocolVersion}); err != nil { - return nil, fmt.Errorf("could not write our version: %w", err) - } else if _, err := s.Read(buf[:]); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) - } else if version := buf[0]; version != protocolVersion { - return nil, fmt.Errorf("incompatible versions (ours = %v, theirs = %v)", protocolVersion, version) - } - - // exchange headers - ourHeader := rpcHeader{genesisID, uid} - var peerHeader rpcHeader - if err := rpc.WriteObject(s, &ourHeader); err != nil { - return nil, fmt.Errorf("could not write our header: %w", err) - } else if err := rpc.ReadObject(s, &peerHeader); err != nil { - return nil, fmt.Errorf("could not read peer's header: %w", err) - } else if err := validateHeader(ourHeader, peerHeader); err != nil { - return nil, fmt.Errorf("unacceptable header: %w", err) - } - - return &Session{ - Mux: m, - RemoteAddr: conn.RemoteAddr().String(), - RemoteID: peerHeader.UniqueID, - }, nil -} - -// AcceptSession reciprocates the gateway handshake with a peer, establishing a -// Session. -func AcceptSession(conn net.Conn, genesisID types.BlockID, uid UniqueID) (_ *Session, err error) { - m, err := mux.AcceptAnonymous(conn) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - m.Close() - } - }() - s, err := m.AcceptStream() - if err != nil { - return nil, err - } - defer s.Close() - - // exchange versions - var buf [1]byte - if _, err := s.Read(buf[:]); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) - } else if _, err := s.Write([]byte{protocolVersion}); err != nil { - return nil, fmt.Errorf("could not write our version: %w", err) - } else if version := buf[0]; version != protocolVersion { - return nil, fmt.Errorf("incompatible versions (ours = %v, theirs = %v)", protocolVersion, version) - } - - // exchange headers - ourHeader := rpcHeader{genesisID, uid} - var peerHeader rpcHeader - if err := rpc.ReadObject(s, &peerHeader); err != nil { - return nil, fmt.Errorf("could not read peer's header: %w", err) - } else if err := rpc.WriteObject(s, &ourHeader); err != nil { - return nil, fmt.Errorf("could not write our header: %w", err) - } else if err := validateHeader(ourHeader, peerHeader); err != nil { - return nil, fmt.Errorf("unacceptable header: %w", err) - } - - return &Session{ - Mux: m, - RemoteAddr: conn.RemoteAddr().String(), - RemoteID: peerHeader.UniqueID, - }, nil -} diff --git a/v2/net/gateway/peer_test.go b/v2/net/gateway/peer_test.go deleted file mode 100644 index 72cf4e47..00000000 --- a/v2/net/gateway/peer_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package gateway - -import ( - "errors" - "net" - "testing" - - "go.sia.tech/core/v2/net/rpc" - "go.sia.tech/core/v2/types" -) - -type objString string - -func (s *objString) EncodeTo(e *types.Encoder) { e.WriteString(string(*s)) } -func (s *objString) DecodeFrom(d *types.Decoder) { *s = objString(d.ReadString()) } -func (s *objString) MaxLen() int { return 100 } - -func TestHandshake(t *testing.T) { - genesisID := (&types.Block{}).ID() - rpcGreet := rpc.NewSpecifier("greet") - - // initialize peer - l, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - peerErr := make(chan error, 1) - go func() { - peerErr <- func() error { - conn, err := l.Accept() - if err != nil { - return err - } - defer conn.Close() - sess, err := AcceptSession(conn, genesisID, UniqueID{0}) - if err != nil { - return err - } - defer sess.Close() - stream, err := sess.AcceptStream() - if err != nil { - return err - } - defer stream.Close() - id, err := rpc.ReadID(stream) - if err != nil { - return err - } else if id != rpcGreet { - return errors.New("unexpected RPC ID") - } - var name objString - if err := rpc.ReadRequest(stream, &name); err != nil { - return err - } - greeting := "Hello, " + name - if err := rpc.WriteResponse(stream, &greeting); err != nil { - return err - } - return nil - }() - }() - - // connect to peer - conn, err := net.Dial("tcp", l.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - sess, err := DialSession(conn, genesisID, UniqueID{1}) - if err != nil { - t.Fatal(err) - } - defer sess.Close() - stream := sess.DialStream() - defer stream.Close() - - name := objString("foo") - var greeting objString - if err := rpc.WriteRequest(stream, rpcGreet, &name); err != nil { - t.Fatal(err) - } else if err := rpc.ReadResponse(stream, &greeting); err != nil { - t.Fatal(err) - } else if greeting != "Hello, foo" { - t.Fatal("unexpected greeting:", greeting) - } - if err := <-peerErr; err != nil { - t.Fatal(err) - } -} diff --git a/v2/net/gateway/rpc.go b/v2/net/gateway/rpc.go deleted file mode 100644 index d2fb6eec..00000000 --- a/v2/net/gateway/rpc.go +++ /dev/null @@ -1,268 +0,0 @@ -package gateway - -import ( - "fmt" - - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/merkle" - "go.sia.tech/core/v2/net/rpc" - "go.sia.tech/core/v2/types" -) - -const defaultMaxLen = 10e3 -const largeMaxLen = 1e6 - -// MaxRPCPeersLen is the maximum number of peers that RPCPeers can return. -const MaxRPCPeersLen = 100 - -// RPC IDs -var ( - RPCPeersID = rpc.NewSpecifier("Peers") - RPCHeadersID = rpc.NewSpecifier("Headers") - RPCBlocksID = rpc.NewSpecifier("Blocks") - RPCCheckpointID = rpc.NewSpecifier("Checkpoint") - RPCRelayBlockID = rpc.NewSpecifier("RelayBlock") - RPCRelayTxnID = rpc.NewSpecifier("RelayTxn") -) - -// RPC request/response objects -type ( - // RPCPeersRequest contains the request parameters for the Peers RPC. - RPCPeersRequest struct{} - - // RPCHeadersRequest contains the request parameters for the Headers RPC. - RPCHeadersRequest struct { - History []types.ChainIndex - } - - // RPCHeadersResponse contains the response data for the Headers RPC. - RPCHeadersResponse struct { - Headers []types.BlockHeader - } - - // RPCBlocksRequest contains the request parameters for the Blocks RPC. - RPCBlocksRequest struct { - Blocks []types.ChainIndex - } - - // RPCBlocksResponse contains the response data for the Blocks RPC. - RPCBlocksResponse struct { - Blocks []types.Block - } - - // RPCCheckpointRequest contains the request parameters for the Checkpoint RPC. - RPCCheckpointRequest struct { - Index types.ChainIndex - } - - // RPCCheckpointResponse contains the response data for the Checkpoint RPC. - RPCCheckpointResponse struct { - // NOTE: we don't use a consensus.Checkpoint, because a Checkpoint.State - // is the *child* state for the block, not its parent state. - Block types.Block - ParentState consensus.State - } - - // RPCRelayBlockRequest contains the request parameters for the RelayBlock RPC. - RPCRelayBlockRequest struct { - Block types.Block - } - - // RPCRelayTxnRequest contains the request parameters for the RelayTxn RPC. - RPCRelayTxnRequest struct { - Transaction types.Transaction - DependsOn []types.Transaction - } -) - -// IsRelayRPC returns true for request objects that should be relayed. -func IsRelayRPC(msg rpc.Object) bool { - switch msg.(type) { - case *RPCHeadersRequest, - *RPCPeersRequest, - *RPCBlocksRequest, - *RPCCheckpointRequest: - return false - case *RPCRelayBlockRequest, - *RPCRelayTxnRequest: - return true - default: - panic(fmt.Sprintf("unhandled type %T", msg)) - } -} - -// rpc.Object implementations - -// EncodeTo implements rpc.Object. -func (RPCPeersRequest) EncodeTo(e *types.Encoder) {} - -// DecodeFrom implements rpc.Object. -func (RPCPeersRequest) DecodeFrom(d *types.Decoder) {} - -// MaxLen implements rpc.Object. -func (RPCPeersRequest) MaxLen() int { return 0 } - -// EncodeTo implements rpc.Object. -func (r *RPCHeadersRequest) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.History)) - for i := range r.History { - r.History[i].EncodeTo(e) - } -} - -// DecodeFrom implements rpc.Object. -func (r *RPCHeadersRequest) DecodeFrom(d *types.Decoder) { - r.History = make([]types.ChainIndex, d.ReadPrefix()) - for i := range r.History { - r.History[i].DecodeFrom(d) - } -} - -// MaxLen implements rpc.Object. -func (RPCHeadersRequest) MaxLen() int { return defaultMaxLen } - -// EncodeTo implements rpc.Object. -func (r *RPCHeadersResponse) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Headers)) - for i := range r.Headers { - r.Headers[i].EncodeTo(e) - } -} - -// DecodeFrom implements rpc.Object. -func (r *RPCHeadersResponse) DecodeFrom(d *types.Decoder) { - r.Headers = make([]types.BlockHeader, d.ReadPrefix()) - for i := range r.Headers { - r.Headers[i].DecodeFrom(d) - } -} - -// MaxLen implements rpc.Object. -func (RPCHeadersResponse) MaxLen() int { return largeMaxLen } - -// RPCPeersResponse contains the response data for the Peers RPC. -type RPCPeersResponse []string - -// EncodeTo implements rpc.Object. -func (r *RPCPeersResponse) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(*r)) - for i := range *r { - e.WriteString((*r)[i]) - } -} - -// DecodeFrom implements rpc.Object. -func (r *RPCPeersResponse) DecodeFrom(d *types.Decoder) { - *r = make([]string, d.ReadPrefix()) - for i := range *r { - (*r)[i] = d.ReadString() - } -} - -// MaxLen implements rpc.Object. -func (RPCPeersResponse) MaxLen() int { - const maxDomainLen = 256 // See https://www.freesoft.org/CIE/RFC/1035/9.htm - return 8 + MaxRPCPeersLen*maxDomainLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCBlocksRequest) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Blocks)) - for i := range r.Blocks { - r.Blocks[i].EncodeTo(e) - } -} - -// DecodeFrom implements rpc.Object. -func (r *RPCBlocksRequest) DecodeFrom(d *types.Decoder) { - r.Blocks = make([]types.ChainIndex, d.ReadPrefix()) - for i := range r.Blocks { - r.Blocks[i].DecodeFrom(d) - } -} - -// MaxLen implements rpc.Object. -func (RPCBlocksRequest) MaxLen() int { return defaultMaxLen } - -// EncodeTo implements rpc.Object. -func (r *RPCBlocksResponse) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Blocks)) - for i := range r.Blocks { - merkle.CompressedBlock(r.Blocks[i]).EncodeTo(e) - } -} - -// DecodeFrom implements rpc.Object. -func (r *RPCBlocksResponse) DecodeFrom(d *types.Decoder) { - r.Blocks = make([]types.Block, d.ReadPrefix()) - for i := range r.Blocks { - (*merkle.CompressedBlock)(&r.Blocks[i]).DecodeFrom(d) - } -} - -// MaxLen implements rpc.Object. -func (RPCBlocksResponse) MaxLen() int { - return 100e6 // arbitrary -} - -// EncodeTo implements rpc.Object. -func (r *RPCCheckpointRequest) EncodeTo(e *types.Encoder) { - r.Index.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCCheckpointRequest) DecodeFrom(d *types.Decoder) { - r.Index.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (RPCCheckpointRequest) MaxLen() int { return 40 } - -// EncodeTo implements rpc.Object. -func (r *RPCCheckpointResponse) EncodeTo(e *types.Encoder) { - merkle.CompressedBlock(r.Block).EncodeTo(e) - r.ParentState.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCCheckpointResponse) DecodeFrom(d *types.Decoder) { - (*merkle.CompressedBlock)(&r.Block).DecodeFrom(d) - r.ParentState.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (RPCCheckpointResponse) MaxLen() int { return largeMaxLen } - -// EncodeTo implements rpc.Object. -func (r *RPCRelayBlockRequest) EncodeTo(e *types.Encoder) { - merkle.CompressedBlock(r.Block).EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCRelayBlockRequest) DecodeFrom(d *types.Decoder) { - (*merkle.CompressedBlock)(&r.Block).DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (RPCRelayBlockRequest) MaxLen() int { return defaultMaxLen } - -// EncodeTo implements rpc.Object. -func (r *RPCRelayTxnRequest) EncodeTo(e *types.Encoder) { - r.Transaction.EncodeTo(e) - e.WritePrefix(len(r.DependsOn)) - for i := range r.DependsOn { - r.DependsOn[i].EncodeTo(e) - } -} - -// DecodeFrom implements rpc.Object. -func (r *RPCRelayTxnRequest) DecodeFrom(d *types.Decoder) { - r.Transaction.DecodeFrom(d) - r.DependsOn = make([]types.Transaction, d.ReadPrefix()) - for i := range r.DependsOn { - r.DependsOn[i].DecodeFrom(d) - } -} - -// MaxLen implements rpc.Object. -func (RPCRelayTxnRequest) MaxLen() int { return defaultMaxLen } diff --git a/v2/net/rhp/builder.go b/v2/net/rhp/builder.go deleted file mode 100644 index 96155c85..00000000 --- a/v2/net/rhp/builder.go +++ /dev/null @@ -1,218 +0,0 @@ -package rhp - -import ( - "bytes" - "errors" - "fmt" - - "go.sia.tech/core/v2/types" -) - -// A ProgramBuilder constructs MDM programs for the renter to execute on a host. -type ProgramBuilder struct { - instructions []Instruction - requiresFinalization bool - requiresContract bool - encoder *types.Encoder - data *bytes.Buffer - offset uint64 - usage ResourceUsage - - duration uint64 - settings HostSettings -} - -func (pb *ProgramBuilder) addUsage(usage ResourceUsage) { - pb.usage = pb.usage.Add(usage) -} - -func (pb *ProgramBuilder) appendInstruction(instr Instruction) { - pb.requiresContract = pb.requiresContract || InstructionRequiresContract(instr) - pb.requiresFinalization = pb.requiresFinalization || InstructionRequiresFinalization(instr) - pb.instructions = append(pb.instructions, instr) -} - -// AddAppendSectorInstruction adds an append sector instruction to the program. -func (pb *ProgramBuilder) AddAppendSectorInstruction(sector *[SectorSize]byte, proof bool) { - instr := &InstrAppendSector{ - SectorDataOffset: pb.offset, - ProofRequired: proof, - } - pb.encoder.Write(sector[:]) - pb.offset += SectorSize - pb.appendInstruction(instr) - pb.addUsage(AppendSectorCost(pb.settings, pb.duration)) -} - -// AddUpdateSectorInstruction adds an update sector instruction to the program. -func (pb *ProgramBuilder) AddUpdateSectorInstruction(offset uint64, data []byte, proof bool) error { - l := uint64(len(data)) - if offset+l > SectorSize { - return errors.New("update offset + length exceeds sector size") - } - - instr := &InstrUpdateSector{ - Offset: offset, - Length: l, - DataOffset: pb.offset, - ProofRequired: proof, - } - pb.encoder.Write(data) - pb.offset += l - pb.appendInstruction(instr) - pb.addUsage(UpdateSectorCost(pb.settings, l)) - return nil -} - -// AddDropSectorInstruction adds a drop sector instruction to the program. -func (pb *ProgramBuilder) AddDropSectorInstruction(sectors uint64, proof bool) { - instr := &InstrDropSectors{ - SectorCountOffset: pb.offset, - ProofRequired: proof, - } - pb.encoder.WriteUint64(sectors) - pb.offset += 8 - pb.appendInstruction(instr) - pb.addUsage(DropSectorsCost(pb.settings, sectors)) -} - -// AddHasSectorInstruction adds a has sector instruction to the program. -func (pb *ProgramBuilder) AddHasSectorInstruction(root types.Hash256) { - instr := &InstrHasSector{ - SectorRootOffset: pb.offset, - } - root.EncodeTo(pb.encoder) - pb.offset += 32 - pb.appendInstruction(instr) - pb.addUsage(HasSectorCost(pb.settings)) -} - -// AddReadSectorInstruction adds a read sector instruction to the program. -func (pb *ProgramBuilder) AddReadSectorInstruction(root types.Hash256, offset uint64, length uint64, proof bool) error { - if offset+length > SectorSize { - return errors.New("read offset + length exceeds sector size") - } - - instr := &InstrReadSector{ - RootOffset: pb.offset, - SectorOffset: pb.offset + 32, - LengthOffset: pb.offset + 40, - ProofRequired: proof, - } - root.EncodeTo(pb.encoder) - pb.encoder.WriteUint64(offset) - pb.encoder.WriteUint64(length) - pb.offset += 48 - pb.appendInstruction(instr) - pb.addUsage(ReadCost(pb.settings, length)) - return nil -} - -// AddReadOffsetInstruction adds a read offset instruction to the program. -func (pb *ProgramBuilder) AddReadOffsetInstruction(offset, length uint64, proof bool) { - instr := &InstrReadOffset{ - DataOffset: pb.offset, - LengthOffset: pb.offset + 8, - ProofRequired: proof, - } - pb.encoder.WriteUint64(offset) - pb.encoder.WriteUint64(length) - pb.offset += 16 - pb.appendInstruction(instr) - pb.addUsage(ReadCost(pb.settings, length)) -} - -// AddDropSectorsInstruction adds a drop sectors instruction to the program. -func (pb *ProgramBuilder) AddDropSectorsInstruction(sectors uint64, proof bool) { - instr := &InstrDropSectors{ - SectorCountOffset: pb.offset, - ProofRequired: proof, - } - pb.encoder.WriteUint64(sectors) - pb.offset += 8 - pb.appendInstruction(instr) - pb.addUsage(DropSectorsCost(pb.settings, sectors)) -} - -// AddSectorRootsInstruction adds a contract roots instruction to the program, -// returning the Merkle root of each sector stored by the contract. -func (pb *ProgramBuilder) AddSectorRootsInstruction(sectors uint64) { - pb.appendInstruction(&InstrSectorRoots{}) - pb.addUsage(SectorRootsCost(pb.settings, sectors)) -} - -// AddRevisionInstruction adds a revision instruction to the program. -func (pb *ProgramBuilder) AddRevisionInstruction() { - pb.appendInstruction(&InstrContractRevision{}) - pb.addUsage(RevisionCost(pb.settings)) -} - -// AddSwapSectorInstruction adds a swap sector instruction to the program. -func (pb *ProgramBuilder) AddSwapSectorInstruction(i, j uint64, proof bool) { - instr := &InstrSwapSector{ - RootAOffset: pb.offset, - RootBOffset: pb.offset + 32, - ProofRequired: proof, - } - pb.encoder.WriteUint64(i) - pb.encoder.WriteUint64(j) - pb.offset += 64 - pb.appendInstruction(instr) - pb.addUsage(SwapSectorCost(pb.settings)) -} - -// AddUpdateRegistryInstruction adds an update registry instruction to the program. -func (pb *ProgramBuilder) AddUpdateRegistryInstruction(value RegistryValue) { - instr := &InstrUpdateRegistry{ - EntryOffset: pb.offset, - } - value.EncodeTo(pb.encoder) - // TODO: ? - if err := pb.encoder.Flush(); err != nil { - panic(err) // should never happen - } - pb.offset = uint64(pb.data.Len()) - pb.appendInstruction(instr) - pb.addUsage(UpdateRegistryCost(pb.settings)) -} - -// AddReadRegistryInstruction adds a read registry instruction to the program. -func (pb *ProgramBuilder) AddReadRegistryInstruction(pub types.PublicKey, tweak types.Hash256) { - instr := &InstrReadRegistry{ - PublicKeyOffset: pb.offset, - TweakOffset: pb.offset + 32, - } - pub.EncodeTo(pb.encoder) - tweak.EncodeTo(pb.encoder) - pb.offset += 64 - pb.appendInstruction(instr) - pb.addUsage(ReadRegistryCost(pb.settings)) -} - -// Cost returns the estimated cost of executing the program, excluding bandwidth -// usage. -func (pb *ProgramBuilder) Cost() ResourceUsage { - // use the initial cost as a base, then add the running total from the - // program builder. - return ExecutionCost(pb.settings, pb.offset, uint64(len(pb.instructions)), pb.requiresFinalization).Add(pb.usage) -} - -// Program returns the program's instructions and a bool indicating if the -// program is read-only. -func (pb *ProgramBuilder) Program() (instructions []Instruction, requiresContract, requiresFinalization bool, err error) { - if err := pb.encoder.Flush(); err != nil { - return nil, false, false, fmt.Errorf("failed to flush program data: %w", err) - } - return pb.instructions, pb.requiresContract, pb.requiresFinalization, nil -} - -// NewProgramBuilder initializes a new empty ProgramBuilder. -func NewProgramBuilder(settings HostSettings, data *bytes.Buffer, duration uint64) *ProgramBuilder { - return &ProgramBuilder{ - encoder: types.NewEncoder(data), - data: data, - offset: uint64(data.Len()), - duration: duration, - settings: settings, - } -} diff --git a/v2/net/rhp/builder_test.go b/v2/net/rhp/builder_test.go deleted file mode 100644 index 68c2d380..00000000 --- a/v2/net/rhp/builder_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package rhp - -import ( - "bytes" - "io" - "reflect" - "testing" - - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -var testSettings = HostSettings{ - AcceptingContracts: true, - ContractFee: types.Siacoins(1), - Collateral: types.Siacoins(1).Div64(1 << 22).Div64(4320), // 1 SC per sector per block per month - MaxCollateral: types.Siacoins(5000), - MaxDuration: 4960, - StoragePrice: types.Siacoins(1).Div64(1 << 22).Div64(4320), // 1 SC per sector per block per month - DownloadBandwidthPrice: types.Siacoins(1).Div64(1 << 22), // 1 SC per sector - UploadBandwidthPrice: types.Siacoins(1).Div64(1 << 22), // 1 SC per sector - SectorSize: 1 << 22, - WindowSize: 144, - - RPCFundAccountCost: types.NewCurrency64(1), - RPCAccountBalanceCost: types.NewCurrency64(1), - RPCRenewContractCost: types.NewCurrency64(1), - RPCHostSettingsCost: types.NewCurrency64(1), - RPCLatestRevisionCost: types.NewCurrency64(1), -} - -func TestAppendProgram(t *testing.T) { - var sector [SectorSize]byte - frand.Read(sector[:128]) - - buf := bytes.NewBuffer(nil) - builder := NewProgramBuilder(testSettings, buf, 10) - builder.AddAppendSectorInstruction(§or, true) - - instructions, requiresContract, requiresFinalization, err := builder.Program() - switch { - case err != nil: - t.Fatal(err) - case len(instructions) != 1: - t.Fatal("wrong number of instructions") - case !requiresContract: - t.Fatal("program should require a contract") - case !requiresFinalization: - t.Fatal("program should require finalization") - case !bytes.Equal(buf.Bytes(), sector[:]): - t.Fatal("wrong data") - } - - if _, ok := instructions[0].(*InstrAppendSector); !ok { - t.Fatal("expected append sector instruction") - } -} - -func TestUpdateProgram(t *testing.T) { - offset := frand.Uint64n(SectorSize - 128) - data := make([]byte, 128) - frand.Read(data) - - buf := bytes.NewBuffer(nil) - builder := NewProgramBuilder(testSettings, buf, 10) - builder.AddUpdateSectorInstruction(offset, data, true) - - instructions, requiresContract, requiresFinalization, err := builder.Program() - switch { - case err != nil: - t.Fatal(err) - case len(instructions) != 1: - t.Fatal("wrong number of instructions") - case !requiresContract: - t.Fatal("program should require a contract") - case !requiresFinalization: - t.Fatal("program should require finalization") - case instructions[0].(*InstrUpdateSector).Offset != offset: - t.Fatalf("invalid sector offset got %v, expected %v", instructions[0].(*InstrUpdateSector).Offset, offset) - case instructions[0].(*InstrUpdateSector).DataOffset != 0: - t.Fatalf("invalid data offset got %v, expected %v", instructions[0].(*InstrUpdateSector).DataOffset, 0) - case !bytes.Equal(buf.Bytes(), data): - t.Fatal("wrong data") - } -} - -func TestReadSectorProgram(t *testing.T) { - var sector [SectorSize]byte - frand.Read(sector[:128]) - root := SectorRoot(§or) - offset := frand.Uint64n(100) - length := frand.Uint64n(100) - - buf := bytes.NewBuffer(nil) - builder := NewProgramBuilder(testSettings, buf, 10) - - if err := builder.AddReadSectorInstruction(root, offset, length, true); err != nil { - t.Fatal(err) - } - - instructions, requiresContract, requiresFinalization, err := builder.Program() - switch { - case err != nil: - t.Fatal(err) - case len(instructions) != 1: - t.Fatal("wrong number of instructions") - case requiresContract: - t.Fatal("program should not require a contract") - case requiresFinalization: - t.Fatal("program should not require finalization") - case buf.Len() != 32+8+8: - t.Fatalf("wrong data length expected %v, got %v", 32+8+8, buf.Len()) - } - - decoder := types.NewDecoder(io.LimitedReader{R: buf, N: 32 + 8 + 8}) - - var encodedRoot types.Hash256 - encodedRoot.DecodeFrom(decoder) - if encodedRoot != root { - t.Fatalf("wrong root expected %v, got %v", root, encodedRoot) - } - - encodedOffset := decoder.ReadUint64() - if encodedOffset != offset { - t.Fatalf("wrong offset expected %v, got %v", offset, encodedOffset) - } - - encodedLength := decoder.ReadUint64() - if encodedLength != length { - t.Fatalf("wrong length expected %v, got %v", length, encodedLength) - } - - if _, ok := instructions[0].(*InstrReadSector); !ok { - t.Fatal("expected append sector instruction") - } -} - -func randomRegistryValue(key types.PrivateKey) (value RegistryValue) { - value.Tweak = frand.Entropy256() - value.Data = frand.Bytes(32) - value.Type = EntryTypeArbitrary - value.PublicKey = key.PublicKey() - value.Signature = key.SignHash(value.Hash()) - return -} - -func TestRegistryProgram(t *testing.T) { - key := types.GeneratePrivateKey() - value := randomRegistryValue(key) - value2 := randomRegistryValue(key) - - buf := bytes.NewBuffer(nil) - builder := NewProgramBuilder(testSettings, buf, 10) - builder.AddReadRegistryInstruction(value.PublicKey, value.Tweak) - builder.AddUpdateRegistryInstruction(value) - builder.AddReadRegistryInstruction(value2.PublicKey, value2.Tweak) - - instructions, requiresContract, requiresFinalization, err := builder.Program() - switch { - case err != nil: - t.Fatal(err) - case len(instructions) != 3: - t.Fatal("wrong number of instructions") - case requiresContract: - t.Fatal("program should not require a contract") - case requiresFinalization: - t.Fatal("program should not require finalization") - } - - r := bytes.NewReader(buf.Bytes()) - dec := types.NewDecoder(io.LimitedReader{R: r, N: int64(buf.Len())}) - - readInstr, ok := instructions[0].(*InstrReadRegistry) - if !ok { - t.Fatal("expected read registry instruction") - } else if readInstr.PublicKeyOffset != 0 { - t.Fatal("wrong public key offset") - } else if readInstr.TweakOffset != 32 { - t.Fatalf("wrong tweak offset %v, expected %v", readInstr.TweakOffset, 8) - } - - var dataPubKey types.PublicKey - r.Seek(int64(readInstr.PublicKeyOffset), io.SeekStart) - dataPubKey.DecodeFrom(dec) - if dataPubKey != value.PublicKey { - t.Fatal("wrong public key") - } - - var dataTweak types.Hash256 - r.Seek(int64(readInstr.TweakOffset), io.SeekStart) - dataTweak.DecodeFrom(dec) - if dataTweak != value.Tweak { - t.Fatal("wrong tweak") - } - - updateInstr, ok := instructions[1].(*InstrUpdateRegistry) - if !ok { - t.Fatal("expected read registry instruction") - } else if updateInstr.EntryOffset != 64 { - t.Fatal("wrong value offset") - } - - var dataValue RegistryValue - r.Seek(int64(updateInstr.EntryOffset), io.SeekStart) - dataValue.DecodeFrom(dec) - if !reflect.DeepEqual(dataValue, value) { - t.Fatal("wrong encoded value") - } - - readInstr, ok = instructions[2].(*InstrReadRegistry) - if !ok { - t.Fatal("expected read registry instruction") - } else if readInstr.PublicKeyOffset != uint64(buf.Len())-64 { - t.Fatal("wrong public key offset") - } else if readInstr.TweakOffset != uint64(buf.Len())-32 { - t.Fatalf("wrong tweak offset %v, expected %v", readInstr.TweakOffset, 8) - } - - r.Seek(int64(readInstr.PublicKeyOffset), io.SeekStart) - dataPubKey.DecodeFrom(dec) - if dataPubKey != value2.PublicKey { - t.Fatal("wrong public key") - } - - r.Seek(int64(readInstr.TweakOffset), io.SeekStart) - dataTweak.DecodeFrom(dec) - if dataTweak != value2.Tweak { - t.Fatal("wrong tweak") - } -} - -func BenchmarkProgramBuilder(b *testing.B) { - var sector [SectorSize]byte - frand.Read(sector[:128]) - - buf := bytes.NewBuffer(make([]byte, 0, SectorSize*b.N)) - builder := NewProgramBuilder(testSettings, buf, 10) - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(SectorSize)) - for i := 0; i < b.N; i++ { - builder.AddAppendSectorInstruction(§or, true) - } -} diff --git a/v2/net/rhp/contracts.go b/v2/net/rhp/contracts.go deleted file mode 100644 index 0caf8f43..00000000 --- a/v2/net/rhp/contracts.go +++ /dev/null @@ -1,244 +0,0 @@ -package rhp - -import ( - "errors" - - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/types" -) - -var ( - // ErrInvalidRenterSignature is returned when a contract's renter signature is invalid. - ErrInvalidRenterSignature = errors.New("invalid renter signature") - - // ErrInvalidHostSignature is returned when a contract's host signature is invalid. - ErrInvalidHostSignature = errors.New("invalid host signature") -) - -// A Contract pairs a file contract with its ID. -type Contract struct { - ID types.ElementID - Revision types.FileContract -} - -// EncodeTo implements types.EncoderTo. -func (c *Contract) EncodeTo(enc *types.Encoder) { - c.ID.EncodeTo(enc) - c.Revision.EncodeTo(enc) -} - -// DecodeFrom implements types.DecoderFrom. -func (c *Contract) DecodeFrom(dec *types.Decoder) { - c.ID.DecodeFrom(dec) - c.Revision.DecodeFrom(dec) -} - -// MaxLen implements rpc.Object. -func (c *Contract) MaxLen() uint64 { - return 10e3 -} - -// PaymentRevision returns a new file contract revision with the specified -// amount moved from the renter's payout to the host's payout (both valid and -// missed). The revision number is incremented. -func PaymentRevision(fc types.FileContract, amount types.Currency) (types.FileContract, error) { - if fc.RenterOutput.Value.Cmp(amount) < 0 { - return fc, errors.New("insufficient funds") - } - fc.RevisionNumber++ - fc.RenterOutput.Value = fc.RenterOutput.Value.Sub(amount) - fc.HostOutput.Value = fc.HostOutput.Value.Add(amount) - fc.MissedHostValue = fc.MissedHostValue.Add(amount) - return fc, nil -} - -// FinalizeProgramRevision returns a new file contract revision with the burn -// amount subtracted from the host output. The revision number is incremented. -func FinalizeProgramRevision(fc types.FileContract, burn types.Currency) (types.FileContract, error) { - if fc.MissedHostValue.Cmp(burn) < 0 { - return fc, errors.New("insufficient funds") - } - fc.RevisionNumber++ - fc.MissedHostValue = fc.MissedHostValue.Sub(burn) - return fc, nil -} - -// ValidateContractSignatures validates a contract's renter and host signatures. -func ValidateContractSignatures(cs consensus.State, fc types.FileContract) (err error) { - hash := cs.ContractSigHash(fc) - if !fc.RenterPublicKey.VerifyHash(hash, fc.RenterSignature) { - return ErrInvalidRenterSignature - } else if !fc.HostPublicKey.VerifyHash(hash, fc.HostSignature) { - return ErrInvalidHostSignature - } - return nil -} - -// ValidateContractFormation verifies that the new contract is valid given the -// host's settings. -func ValidateContractFormation(fc types.FileContract, currentHeight uint64, settings HostSettings) error { - switch { - case fc.Filesize != 0: - return errors.New("initial filesize should be 0") - case fc.RevisionNumber != 0: - return errors.New("initial revision number should be 0") - case fc.FileMerkleRoot != types.Hash256{}: - return errors.New("initial Merkle root should be empty") - case fc.WindowStart < currentHeight+settings.WindowSize: - return errors.New("contract ends too soon to safely submit the contract transaction") - case fc.WindowStart > currentHeight+settings.MaxDuration: - return errors.New("contract duration is too long") - case fc.WindowEnd < fc.WindowStart+settings.WindowSize: - return errors.New("proof window is too small") - case fc.HostOutput.Address != settings.Address: - return errors.New("wrong address for host valid output") - case fc.HostOutput.Value != fc.MissedHostValue: - return errors.New("host valid output value does not equal missed value") - case fc.HostOutput.Value != settings.ContractFee.Add(fc.TotalCollateral): - return errors.New("wrong initial host output value") - case fc.TotalCollateral.Cmp(settings.MaxCollateral) > 0: - return errors.New("excessive initial collateral") - } - return nil -} - -// ValidateContractRenewal verifies that the renewed contract is valid given the -// old contract. A renewal is valid if the contract fields match and the -// revision number is 0. -func ValidateContractRenewal(existing, renewal types.FileContract, currentHeight uint64, settings HostSettings) error { - switch { - case renewal.HostPublicKey != existing.HostPublicKey: - return errors.New("host public key must not change") - case renewal.RenterPublicKey != existing.RenterPublicKey: - return errors.New("renter public key must not change") - case renewal.RevisionNumber != 0: - return errors.New("revision number must be zero") - case renewal.Filesize != existing.Filesize: - return errors.New("filesize must not change") - case renewal.FileMerkleRoot != existing.FileMerkleRoot: - return errors.New("file Merkle root must not change") - case renewal.WindowEnd < existing.WindowEnd: - return errors.New("renewal window must not end before current window") - case renewal.WindowStart < currentHeight+settings.WindowSize: - return errors.New("contract ends too soon to safely submit the contract transaction") - case renewal.WindowStart > currentHeight+settings.MaxDuration: - return errors.New("contract duration is too long") - case renewal.WindowEnd < renewal.WindowStart+settings.WindowSize: - return errors.New("proof window is too small") - case renewal.HostOutput.Address != settings.Address: - return errors.New("wrong address for host output") - case renewal.HostOutput.Value.Cmp(settings.ContractFee.Add(renewal.TotalCollateral)) < 0: - return errors.New("insufficient initial host payout") - case renewal.TotalCollateral.Cmp(settings.MaxCollateral) > 0: - return errors.New("excessive initial collateral") - } - return nil -} - -// ValidateContractFinalization verifies that the revision locks the current -// contract by setting its revision number to the maximum legal value. No other -// fields should change. Signatures are not validated. -func ValidateContractFinalization(current, final types.FileContract) error { - switch { - case current.Filesize != final.Filesize: - return errors.New("file size must not change") - case current.FileMerkleRoot != final.FileMerkleRoot: - return errors.New("file merkle root must not change") - case current.WindowStart != final.WindowStart: - return errors.New("window start must not change") - case current.WindowEnd != final.WindowEnd: - return errors.New("window end must not change") - case current.RenterOutput != final.RenterOutput: - return errors.New("renter output must not change") - case current.HostOutput != final.HostOutput: - return errors.New("valid host output must not change") - case current.MissedHostValue != final.MissedHostValue: - return errors.New("missed host payout must not change") - case current.TotalCollateral != final.TotalCollateral: - return errors.New("total collateral must not change") - case current.RenterPublicKey != final.RenterPublicKey: - return errors.New("renter public key must not change") - case current.HostPublicKey != final.HostPublicKey: - return errors.New("host public key must not change") - case final.RevisionNumber != types.MaxRevisionNumber: - return errors.New("revision number must be max value") - } - return nil -} - -func validateStdRevision(current, revision types.FileContract) error { - switch { - case revision.RevisionNumber <= current.RevisionNumber: - return errors.New("revision number must increase") - case revision.WindowStart != current.WindowStart: - return errors.New("window start must not change") - case revision.WindowEnd != current.WindowEnd: - return errors.New("window end must not change") - case revision.RenterPublicKey != current.RenterPublicKey: - return errors.New("renter public key must not change") - case revision.HostPublicKey != current.HostPublicKey: - return errors.New("host public key must not change") - case revision.RenterOutput.Address != current.RenterOutput.Address: - return errors.New("renter address must not change") - case revision.HostOutput.Address != current.HostOutput.Address: - return errors.New("host address must not change") - case revision.TotalCollateral != current.TotalCollateral: - return errors.New("total collateral must not change") - } - return nil -} - -// ValidateProgramRevision verifies that a contract program revision is valid -// and only the missed host output value is modified by the expected burn amount -// all other usage will have been paid for by the RPC budget. Signatures are not -// validated. -func ValidateProgramRevision(current, revision types.FileContract, additionalStorage, additionalCollateral types.Currency) error { - // verify the new revision is valid given the existing revision and the - // public keys have not changed - if err := validateStdRevision(current, revision); err != nil { - return err - } - - expectedBurn := additionalStorage.Add(additionalCollateral) - if expectedBurn.Cmp(current.MissedHostValue) > 0 { - return errors.New("expected burn amount is greater than the missed host output value") - } - missedHostValue := current.MissedHostValue.Sub(expectedBurn) - - switch { - case revision.MissedHostValue != missedHostValue: - return errors.New("revision has incorrect collateral transfer") - case revision.RenterOutput != current.RenterOutput: - return errors.New("renter output should not change") - case revision.HostOutput != current.HostOutput: - return errors.New("host valid output should not change") - } - return nil -} - -// ValidatePaymentRevision verifies that a payment revision is valid and the -// amount is properly deducted from both renter outputs and added to both host -// outputs. Signatures are not validated. -func ValidatePaymentRevision(current, revision types.FileContract, amount types.Currency) error { - // verify the new revision is valid given the existing revision and the - // public keys have not changed. - if err := validateStdRevision(current, revision); err != nil { - return err - } - - // validate that all fields are consistent with only transferring the amount - // from the renter payouts to the host payouts. - switch { - case revision.FileMerkleRoot != current.FileMerkleRoot: - return errors.New("file merkle root must not change") - case revision.Filesize != current.Filesize: - return errors.New("file size must not change") - case revision.RenterOutput.Value.Add(amount) != current.RenterOutput.Value: - return errors.New("renter output value should decrease by the amount") - case revision.HostOutput.Value != current.HostOutput.Value.Add(amount): - return errors.New("host output value should increase by the amount") - case revision.MissedHostValue != current.MissedHostValue.Add(amount): - return errors.New("host missed output value should increase by the amount") - } - return nil -} diff --git a/v2/net/rhp/contracts_test.go b/v2/net/rhp/contracts_test.go deleted file mode 100644 index 79d94eef..00000000 --- a/v2/net/rhp/contracts_test.go +++ /dev/null @@ -1,554 +0,0 @@ -package rhp - -import ( - "encoding/binary" - "reflect" - "testing" - - "go.sia.tech/core/v2/consensus" - "go.sia.tech/core/v2/types" -) - -func outputValue(amount types.Currency) types.SiacoinOutput { - return types.SiacoinOutput{Value: amount} -} - -func testingKeypair(seed uint64) (types.PublicKey, types.PrivateKey) { - var b [32]byte - binary.LittleEndian.PutUint64(b[:], seed) - privkey := types.NewPrivateKeyFromSeed(b[:]) - return privkey.PublicKey(), privkey -} - -func TestPaymentRevision(t *testing.T) { - amount := types.Siacoins(10) - fc := types.FileContract{ - RenterOutput: outputValue(amount.Mul64(1)), - HostOutput: outputValue(amount.Mul64(2)), - MissedHostValue: amount.Mul64(3), - RevisionNumber: 5, - } - rev, err := PaymentRevision(fc, amount) - if err != nil { - t.Fatal(err) - } - - expected := types.FileContract{ - RenterOutput: outputValue(types.Siacoins(0)), - HostOutput: outputValue(amount.Mul64(3)), - MissedHostValue: amount.Mul64(4), - RevisionNumber: 6, - } - if !reflect.DeepEqual(rev, expected) { - t.Fatalf("expected %v got %v", expected, rev) - } - - if _, err := PaymentRevision(fc, amount.Mul64(20)); err == nil { - t.Fatal("expected insufficient funds error") - } -} - -func TestFinalizeProgramRevision(t *testing.T) { - amount := types.Siacoins(10) - fc := types.FileContract{ - MissedHostValue: amount.Mul64(3), - RevisionNumber: 5, - } - rev, err := FinalizeProgramRevision(fc, amount) - if err != nil { - t.Fatal(err) - } - - expected := types.FileContract{ - MissedHostValue: amount.Mul64(2), - RevisionNumber: 6, - } - if !reflect.DeepEqual(rev, expected) { - t.Fatalf("expected %v got %v", expected, rev) - } - - if _, err := FinalizeProgramRevision(fc, amount.Mul64(20)); err == nil { - t.Fatal("expected insufficient funds error") - } -} - -func TestValidateContractSignatures(t *testing.T) { - var cs consensus.State - renterPubkey, renterPrivkey := testingKeypair(0) - hostPubkey, hostPrivkey := testingKeypair(0) - - amount := types.Siacoins(10) - fc := types.FileContract{ - RenterOutput: outputValue(amount.Mul64(1)), - HostOutput: outputValue(amount.Mul64(2)), - MissedHostValue: amount.Mul64(3), - RevisionNumber: 5, - HostPublicKey: hostPubkey, - RenterPublicKey: renterPubkey, - } - hash := cs.ContractSigHash(fc) - - fc.HostSignature = hostPrivkey.SignHash(hash) - fc.RenterSignature = renterPrivkey.SignHash(hash) - - if err := ValidateContractSignatures(cs, fc); err != nil { - t.Fatal(err) - } - - fc.HostSignature[0] ^= 255 - - if err := ValidateContractSignatures(cs, fc); err != ErrInvalidHostSignature { - t.Fatalf("expected %v, got %v", ErrInvalidHostSignature, err) - } - - // fix host signature - fc.HostSignature[0] ^= 255 - - fc.RenterSignature[0] ^= 255 - if err := ValidateContractSignatures(cs, fc); err != ErrInvalidRenterSignature { - t.Fatalf("expected %v, got %v", ErrInvalidRenterSignature, err) - } -} - -func TestValidateContractRenewalFinalization(t *testing.T) { - currentHeight := uint64(5) - settings := HostSettings{ - WindowSize: 10, - MaxDuration: 100, - ContractFee: types.Siacoins(500), - MaxCollateral: types.Siacoins(1000), - } - renterPubkey, _ := testingKeypair(0) - hostPubkey, _ := testingKeypair(0) - - windowStart := currentHeight + settings.WindowSize - windowEnd := windowStart + settings.WindowSize - totalCollateral := types.Siacoins(700) - fc := types.FileContract{ - WindowStart: windowStart, - WindowEnd: windowEnd, - TotalCollateral: totalCollateral, - MissedHostValue: settings.ContractFee.Add(totalCollateral), - HostOutput: outputValue(settings.ContractFee.Add(totalCollateral)), - RenterOutput: outputValue(types.Siacoins(10)), - HostPublicKey: hostPubkey, - RenterPublicKey: renterPubkey, - } - - if err := ValidateContractFormation(fc, currentHeight, settings); err != nil { - t.Fatal(err) - } - - formChanges := []struct { - corrupt func(fc *types.FileContract) - desc string - }{ - { - func(fc *types.FileContract) { - fc.Filesize = 5 - }, - "initial filesize should be 0", - }, - { - func(fc *types.FileContract) { - fc.RevisionNumber = 5 - }, - "initial revision number should be 0", - }, - { - func(fc *types.FileContract) { - fc.FileMerkleRoot = types.Hash256{31: 1} - }, - "initial Merkle root should be empty", - }, - { - func(fc *types.FileContract) { - fc.WindowStart = windowStart / 2 - }, - "contract ends too soon to safely submit the contract transaction", - }, - { - func(fc *types.FileContract) { - fc.WindowStart = currentHeight + 2*settings.MaxDuration - }, - "contract duration is too long", - }, - { - func(fc *types.FileContract) { - fc.WindowEnd = windowStart + settings.WindowSize/2 - }, - "proof window is too small", - }, - { - func(fc *types.FileContract) { - fc.HostOutput.Address = types.Address{31: 1} - }, - "wrong address for host valid output", - }, - { - func(fc *types.FileContract) { - fc.MissedHostValue = fc.HostOutput.Value.Mul64(2) - }, - "host valid output value does not equal missed value", - }, - { - func(fc *types.FileContract) { - fc.HostOutput.Value = fc.HostOutput.Value.Mul64(1000) - fc.MissedHostValue = fc.HostOutput.Value - }, - "wrong initial host output value", - }, - { - func(fc *types.FileContract) { - fc.TotalCollateral = settings.MaxCollateral.Mul64(1000) - fc.HostOutput.Value = settings.ContractFee.Add(fc.TotalCollateral) - fc.MissedHostValue = fc.HostOutput.Value - }, - "excessive initial collateral", - }, - } - - for _, change := range formChanges { - fcCopy := fc - change.corrupt(&fcCopy) - if err := ValidateContractFormation(fcCopy, currentHeight, settings); err.Error() != change.desc { - t.Fatalf("expected error %s, got %s", change.desc, err.Error()) - } - } - - currentHeight = windowEnd - - renewal := fc - renewal.WindowEnd += 3 * settings.WindowSize - renewal.WindowStart = fc.WindowEnd + settings.WindowSize - - if err := ValidateContractRenewal(fc, renewal, currentHeight, settings); err != nil { - t.Fatal(err) - } - - renewalChanges := []struct { - corrupt func(existing, renewal *types.FileContract) - desc string - }{ - { - func(existing, renewal *types.FileContract) { - renewal.HostPublicKey[0] ^= 255 - }, - "host public key must not change", - }, - { - func(existing, renewal *types.FileContract) { - renewal.RenterPublicKey[0] ^= 255 - }, - "renter public key must not change", - }, - { - func(existing, renewal *types.FileContract) { - renewal.RevisionNumber++ - }, - "revision number must be zero", - }, - { - func(existing, renewal *types.FileContract) { - renewal.Filesize = existing.Filesize + 5 - }, - "filesize must not change", - }, - { - func(existing, renewal *types.FileContract) { - renewal.FileMerkleRoot[0] ^= 255 - }, - "file Merkle root must not change", - }, - { - func(existing, renewal *types.FileContract) { - renewal.WindowEnd = existing.WindowEnd - 5 - }, - "renewal window must not end before current window", - }, - { - func(existing, renewal *types.FileContract) { - renewal.WindowStart = currentHeight - }, - "contract ends too soon to safely submit the contract transaction", - }, - { - func(existing, renewal *types.FileContract) { - renewal.WindowStart = currentHeight + 2*settings.MaxDuration - }, - "contract duration is too long", - }, - { - func(existing, renewal *types.FileContract) { - renewal.WindowEnd = renewal.WindowStart + settings.WindowSize/2 - }, - "proof window is too small", - }, - { - func(existing, renewal *types.FileContract) { - renewal.HostOutput.Value = renewal.HostOutput.Value.Sub(settings.ContractFee) - }, - "insufficient initial host payout", - }, - { - func(existing, renewal *types.FileContract) { - renewal.TotalCollateral = settings.MaxCollateral.Mul64(1000) - renewal.HostOutput.Value = settings.ContractFee.Add(renewal.TotalCollateral) - }, - "excessive initial collateral", - }, - } - - for _, change := range renewalChanges { - renewCopy := renewal - change.corrupt(&fc, &renewCopy) - if err := ValidateContractRenewal(fc, renewCopy, currentHeight, settings); err.Error() != change.desc { - t.Fatalf("expected error %s, got %s", change.desc, err.Error()) - } - } - - final := renewal - final.RevisionNumber = types.MaxRevisionNumber - - if err := ValidateContractFinalization(renewal, final); err != nil { - t.Fatal(err) - } - - finalChanges := []struct { - corrupt func(current, final *types.FileContract) - desc string - }{ - { - func(current, final *types.FileContract) { - final.Filesize++ - }, - "file size must not change", - }, - { - func(current, final *types.FileContract) { - final.FileMerkleRoot[0] ^= 255 - }, - "file merkle root must not change", - }, - { - func(current, final *types.FileContract) { - final.WindowStart++ - }, - "window start must not change", - }, - { - func(current, final *types.FileContract) { - final.WindowEnd++ - }, - "window end must not change", - }, - { - func(current, final *types.FileContract) { - final.RenterOutput.Value = final.RenterOutput.Value.Add(types.Siacoins(1)) - }, - "renter output must not change", - }, - { - func(current, final *types.FileContract) { - final.HostOutput.Value = final.HostOutput.Value.Add(types.Siacoins(1)) - }, - "valid host output must not change", - }, - { - func(current, final *types.FileContract) { - final.MissedHostValue = final.MissedHostValue.Add(types.Siacoins(1)) - }, - "missed host payout must not change", - }, - { - func(current, final *types.FileContract) { - final.TotalCollateral = final.TotalCollateral.Add(types.Siacoins(1)) - }, - "total collateral must not change", - }, - { - func(current, final *types.FileContract) { - final.RenterPublicKey[0] ^= 255 - }, - "renter public key must not change", - }, - { - func(current, final *types.FileContract) { - final.HostPublicKey[0] ^= 255 - }, - "host public key must not change", - }, - { - func(current, final *types.FileContract) { - final.RevisionNumber = types.MaxRevisionNumber / 2 - }, - "revision number must be max value", - }, - } - - for _, change := range finalChanges { - finalCopy := final - change.corrupt(&renewal, &finalCopy) - if err := ValidateContractFinalization(renewal, finalCopy); err.Error() != change.desc { - t.Fatalf("expected error %s, got %s", change.desc, err.Error()) - } - } -} - -func TestValidateContractRevision(t *testing.T) { - currentHeight := uint64(5) - settings := HostSettings{ - WindowSize: 10, - MaxDuration: 100, - ContractFee: types.Siacoins(500), - MaxCollateral: types.Siacoins(1000), - } - renterPubkey, _ := testingKeypair(0) - hostPubkey, _ := testingKeypair(0) - - windowStart := currentHeight + settings.WindowSize - windowEnd := windowStart + settings.WindowSize - totalCollateral := types.Siacoins(700) - fc := types.FileContract{ - WindowStart: windowStart, - WindowEnd: windowEnd, - TotalCollateral: totalCollateral, - MissedHostValue: settings.ContractFee.Add(totalCollateral), - HostOutput: outputValue(settings.ContractFee.Add(totalCollateral)), - RenterOutput: outputValue(types.Siacoins(10)), - HostPublicKey: hostPubkey, - RenterPublicKey: renterPubkey, - } - - if err := ValidateContractFormation(fc, currentHeight, settings); err != nil { - t.Fatal(err) - } - - revision := fc - revision.RevisionNumber++ - - if err := ValidateProgramRevision(fc, revision, types.ZeroCurrency, types.ZeroCurrency); err != nil { - t.Fatal(err) - } - - revisionChanges := []struct { - corrupt func(current, revision *types.FileContract) - desc string - }{ - { - func(current, revision *types.FileContract) { - revision.RevisionNumber = current.RevisionNumber - }, - "revision number must increase", - }, - { - func(current, revision *types.FileContract) { - revision.WindowStart++ - }, - "window start must not change", - }, - { - func(current, revision *types.FileContract) { - revision.WindowEnd++ - }, - "window end must not change", - }, - { - func(current, revision *types.FileContract) { - revision.RenterPublicKey[0] ^= 255 - }, - "renter public key must not change", - }, - { - func(current, revision *types.FileContract) { - revision.HostPublicKey[0] ^= 255 - }, - "host public key must not change", - }, - { - func(current, revision *types.FileContract) { - revision.RenterOutput.Address[0] ^= 255 - }, - "renter address must not change", - }, - { - func(current, revision *types.FileContract) { - revision.RenterOutput.Value = revision.RenterOutput.Value.Add(types.Siacoins(1)) - }, - "renter output should not change", - }, - { - func(current, revision *types.FileContract) { - revision.HostOutput.Value = revision.HostOutput.Value.Add(types.Siacoins(1)) - }, - "host valid output should not change", - }, - { - func(current, revision *types.FileContract) { - revision.HostOutput.Address[0] ^= 255 - }, - "host address must not change", - }, - { - func(current, revision *types.FileContract) { - revision.TotalCollateral = current.TotalCollateral.Add(types.Siacoins(1)) - }, - "total collateral must not change", - }, - } - - for _, change := range revisionChanges { - revCopy := revision - change.corrupt(&fc, &revCopy) - if err := ValidateProgramRevision(fc, revCopy, types.ZeroCurrency, types.ZeroCurrency); err.Error() != change.desc { - t.Fatalf("expected error %s, got %s", change.desc, err.Error()) - } - } - - if err := ValidatePaymentRevision(fc, revision, types.ZeroCurrency); err != nil { - t.Fatal(err) - } - - paymentRevisionChanges := []struct { - corrupt func(revision *types.FileContract) - desc string - }{ - { - func(revision *types.FileContract) { - revision.FileMerkleRoot[0] ^= 255 - }, - "file merkle root must not change", - }, - { - func(revision *types.FileContract) { - revision.Filesize++ - }, - "file size must not change", - }, - { - func(revision *types.FileContract) { - revision.RenterOutput.Value = revision.RenterOutput.Value.Add(types.Siacoins(1)) - }, - "renter output value should decrease by the amount", - }, - { - func(revision *types.FileContract) { - revision.HostOutput.Value = revision.HostOutput.Value.Sub(types.Siacoins(1)) - }, - "host output value should increase by the amount", - }, - { - func(revision *types.FileContract) { - revision.MissedHostValue = revision.MissedHostValue.Sub(types.Siacoins(1)) - }, - "host missed output value should increase by the amount", - }} - - for _, change := range paymentRevisionChanges { - revCopy := revision - change.corrupt(&revCopy) - if err := ValidatePaymentRevision(fc, revCopy, types.ZeroCurrency); err.Error() != change.desc { - t.Fatalf("expected error %s, got %s", change.desc, err.Error()) - } - } -} diff --git a/v2/net/rhp/mdm.go b/v2/net/rhp/mdm.go deleted file mode 100644 index 7a893bf6..00000000 --- a/v2/net/rhp/mdm.go +++ /dev/null @@ -1,517 +0,0 @@ -package rhp - -import ( - "go.sia.tech/core/v2/net/rpc" - "go.sia.tech/core/v2/types" -) - -const ( - blocksPerYear = 144 * 365 -) - -// Specifiers for MDM instructions -var ( - SpecInstrAppendSector = rpc.NewSpecifier("AppendSector") - SpecInstrUpdateSector = rpc.NewSpecifier("UpdateSector") - SpecInstrDropSectors = rpc.NewSpecifier("DropSectors") - SpecInstrHasSector = rpc.NewSpecifier("HasSector") - SpecInstrReadOffset = rpc.NewSpecifier("ReadOffset") - SpecInstrReadSector = rpc.NewSpecifier("ReadSector") - SpecInstrContractRevision = rpc.NewSpecifier("Revision") - SpecInstrSectorRoots = rpc.NewSpecifier("SectorRoots") - SpecInstrSwapSector = rpc.NewSpecifier("SwapSector") - SpecInstrUpdateRegistry = rpc.NewSpecifier("UpdateRegistry") - SpecInstrReadRegistry = rpc.NewSpecifier("ReadRegistry") - SpecInstrReadRegistrySID = rpc.NewSpecifier("ReadRegistrySID") -) - -// An Instruction is a single instruction in an MDM program. -type Instruction interface { - isInstruction() - rpc.Object -} - -func (InstrAppendSector) isInstruction() {} -func (InstrUpdateSector) isInstruction() {} -func (InstrContractRevision) isInstruction() {} -func (InstrSectorRoots) isInstruction() {} -func (InstrDropSectors) isInstruction() {} -func (InstrHasSector) isInstruction() {} -func (InstrReadOffset) isInstruction() {} -func (InstrReadRegistry) isInstruction() {} -func (InstrReadSector) isInstruction() {} -func (InstrSwapSector) isInstruction() {} -func (InstrUpdateRegistry) isInstruction() {} - -// InstructionRequiresContract returns true if the instruction requires a -// contract to be locked. -func InstructionRequiresContract(i Instruction) bool { - switch i.(type) { - case *InstrAppendSector, - *InstrUpdateSector, - *InstrContractRevision, - *InstrSectorRoots, - *InstrDropSectors, - *InstrSwapSector: - return true - case *InstrHasSector, - *InstrReadOffset, - *InstrReadRegistry, - *InstrReadSector, - *InstrUpdateRegistry: - return false - } - panic("unahndled instruction") -} - -// InstructionRequiresFinalization returns true if the instruction results need -// to be committed to a contract. -func InstructionRequiresFinalization(i Instruction) bool { - switch i.(type) { - case *InstrAppendSector, - *InstrUpdateSector, - *InstrDropSectors, - *InstrSwapSector: - return true - case *InstrContractRevision, - *InstrSectorRoots, - *InstrHasSector, - *InstrReadOffset, - *InstrReadRegistry, - *InstrReadSector, - *InstrUpdateRegistry: - return false - } - panic("unahndled instruction") -} - -// InstrAppendSector uploads and appends a new sector to a contract -type InstrAppendSector struct { - SectorDataOffset uint64 - ProofRequired bool -} - -// MaxLen implements rpc.Object -func (i *InstrAppendSector) MaxLen() int { - return 9 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrAppendSector) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.SectorDataOffset) - e.WriteBool(i.ProofRequired) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrAppendSector) DecodeFrom(d *types.Decoder) { - i.SectorDataOffset = d.ReadUint64() - i.ProofRequired = d.ReadBool() -} - -// InstrUpdateSector uploads and appends a new sector to a contract -type InstrUpdateSector struct { - Offset uint64 - Length uint64 - DataOffset uint64 - ProofRequired bool -} - -// MaxLen implements rpc.Object -func (i *InstrUpdateSector) MaxLen() int { - return 25 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrUpdateSector) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.Offset) - e.WriteUint64(i.Length) - e.WriteUint64(i.DataOffset) - e.WriteBool(i.ProofRequired) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrUpdateSector) DecodeFrom(d *types.Decoder) { - i.Offset = d.ReadUint64() - i.Length = d.ReadUint64() - i.DataOffset = d.ReadUint64() - i.ProofRequired = d.ReadBool() -} - -// InstrContractRevision returns the latest revision of the program's contract. -type InstrContractRevision struct { -} - -// MaxLen implements rpc.Object -func (i *InstrContractRevision) MaxLen() int { - return 0 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrContractRevision) EncodeTo(e *types.Encoder) { -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrContractRevision) DecodeFrom(d *types.Decoder) { -} - -// InstrSectorRoots returns the program's sector roots -type InstrSectorRoots struct { -} - -// MaxLen implements rpc.Object -func (i *InstrSectorRoots) MaxLen() int { - return 0 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrSectorRoots) EncodeTo(e *types.Encoder) { -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrSectorRoots) DecodeFrom(d *types.Decoder) { -} - -// InstrDropSectors deletes a number of sectors from the end of the contract. -type InstrDropSectors struct { - SectorCountOffset uint64 - ProofRequired bool -} - -// MaxLen implements rpc.Object -func (i *InstrDropSectors) MaxLen() int { - return 9 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrDropSectors) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.SectorCountOffset) - e.WriteBool(i.ProofRequired) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrDropSectors) DecodeFrom(d *types.Decoder) { - i.SectorCountOffset = d.ReadUint64() - i.ProofRequired = d.ReadBool() -} - -// InstrHasSector returns true if the host has the given sector. -type InstrHasSector struct { - SectorRootOffset uint64 -} - -// MaxLen implements rpc.Object -func (i *InstrHasSector) MaxLen() int { - return 8 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrHasSector) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.SectorRootOffset) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrHasSector) DecodeFrom(d *types.Decoder) { - i.SectorRootOffset = d.ReadUint64() -} - -// InstrReadOffset reads len bytes from the contract at the given offset. -type InstrReadOffset struct { - DataOffset uint64 - LengthOffset uint64 - ProofRequired bool -} - -// MaxLen implements rpc.Object -func (i *InstrReadOffset) MaxLen() int { - return 17 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrReadOffset) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.DataOffset) - e.WriteUint64(i.LengthOffset) - e.WriteBool(i.ProofRequired) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrReadOffset) DecodeFrom(d *types.Decoder) { - i.DataOffset = d.ReadUint64() - i.LengthOffset = d.ReadUint64() - i.ProofRequired = d.ReadBool() -} - -// InstrReadRegistry reads the given registry key from the contract. -type InstrReadRegistry struct { - PublicKeyOffset uint64 - TweakOffset uint64 -} - -// MaxLen implements rpc.Object -func (i *InstrReadRegistry) MaxLen() int { - return 16 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrReadRegistry) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.PublicKeyOffset) - e.WriteUint64(i.TweakOffset) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrReadRegistry) DecodeFrom(d *types.Decoder) { - i.PublicKeyOffset = d.ReadUint64() - i.TweakOffset = d.ReadUint64() -} - -// InstrReadSector reads offset and len bytes of the sector. -type InstrReadSector struct { - RootOffset uint64 - SectorOffset uint64 - LengthOffset uint64 - ProofRequired bool -} - -// MaxLen implements rpc.Object -func (i *InstrReadSector) MaxLen() int { - return 25 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrReadSector) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.RootOffset) - e.WriteUint64(i.SectorOffset) - e.WriteUint64(i.LengthOffset) - e.WriteBool(i.ProofRequired) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrReadSector) DecodeFrom(d *types.Decoder) { - i.RootOffset = d.ReadUint64() - i.SectorOffset = d.ReadUint64() - i.LengthOffset = d.ReadUint64() - i.ProofRequired = d.ReadBool() -} - -// InstrSwapSector swaps two sectors by root in the contract. -type InstrSwapSector struct { - RootAOffset uint64 - RootBOffset uint64 - ProofRequired bool -} - -// MaxLen implements rpc.Object -func (i *InstrSwapSector) MaxLen() int { - return 17 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrSwapSector) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.RootAOffset) - e.WriteUint64(i.RootBOffset) - e.WriteBool(i.ProofRequired) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrSwapSector) DecodeFrom(d *types.Decoder) { - i.RootAOffset = d.ReadUint64() - i.RootBOffset = d.ReadUint64() - i.ProofRequired = d.ReadBool() -} - -// InstrUpdateRegistry updates a registry entry. -type InstrUpdateRegistry struct { - EntryOffset uint64 -} - -// MaxLen implements rpc.Object -func (i *InstrUpdateRegistry) MaxLen() int { - return 8 -} - -// EncodeTo encodes an instruction to the provided encoder. Implements -// rpc.Object. -func (i *InstrUpdateRegistry) EncodeTo(e *types.Encoder) { - e.WriteUint64(i.EntryOffset) -} - -// DecodeFrom decodes an instruction from the provided decoder. Implements -// rpc.Object. -func (i *InstrUpdateRegistry) DecodeFrom(d *types.Decoder) { - i.EntryOffset = d.ReadUint64() -} - -// ResourceUsage is the associated costs of executing an instruction set or -// individual instruction. -type ResourceUsage struct { - // BaseCost is the cost to execute the instruction and includes - // resource costs like memory and time. - BaseCost types.Currency - // StorageCost cost is charged after successful completion - // of the instruction and should be refunded if the program fails. - StorageCost types.Currency - // AdditionalCollateral cost is the additional collateral the host should - // add during program finalization - AdditionalCollateral types.Currency - - Memory uint64 - Time uint64 -} - -// Add returns the sum of r and b. -func (r ResourceUsage) Add(b ResourceUsage) (c ResourceUsage) { - c.BaseCost = r.BaseCost.Add(b.BaseCost) - c.StorageCost = r.StorageCost.Add(b.StorageCost) - c.AdditionalCollateral = r.AdditionalCollateral.Add(b.AdditionalCollateral) - - c.Memory += b.Memory - c.Time += b.Time - return c -} - -// resourceCost returns the cost of a program with the given data and time -func resourceCost(settings HostSettings, memory, time uint64) types.Currency { - return settings.ProgMemoryTimeCost.Mul64(memory * time) -} - -// writeCost returns the cost of writing the instructions data to disk. -func writeCost(settings HostSettings, n uint64) types.Currency { - // Atomic write size for modern disks is 4kib so we round up. - atomicWriteSize := uint64(1 << 12) - if mod := n % atomicWriteSize; mod != 0 { - n += (atomicWriteSize - mod) - } - return settings.ProgWriteCost.Mul64(n) -} - -// initCost returns the cost of initializing a program. -func initCost(settings HostSettings, data, instructions uint64) (costs ResourceUsage) { - time := 1 + instructions - costs.BaseCost = settings.ProgInitBaseCost.Add(resourceCost(settings, data, time)) - costs.Memory = 1 << 20 - return -} - -// finalizationCost returns the cost of finalizing a program. -// -// note: siad's finalize cost uses the program's total memory usage, but memory -// cost is already included in the instruction's base cost. -func finalizationCost(settings HostSettings) (costs ResourceUsage) { - costs.Memory = 1000 - costs.Time = 50000 - costs.BaseCost = resourceCost(settings, costs.Memory, costs.Time) - return -} - -// ExecutionCost returns the cost of initializing and, optionally, finalizing a -// program. -func ExecutionCost(settings HostSettings, data, instructions uint64, requiresFinalization bool) (costs ResourceUsage) { - costs = initCost(settings, data, instructions) - if requiresFinalization { - costs = costs.Add(finalizationCost(settings)) - } - return -} - -// AppendSectorCost returns the cost of the append sector instruction. -func AppendSectorCost(settings HostSettings, duration uint64) (costs ResourceUsage) { - costs.Memory = SectorSize - costs.Time = 10000 - - // base cost is cost of writing 1 sector and storing 1 sector in memory. - // note: in siad the memory cost is calculated using the program's total - // memory, here I've opted to use only the instruction's memory. - costs.BaseCost = settings.InstrAppendSectorBaseCost.Add(writeCost(settings, SectorSize)).Add(resourceCost(settings, costs.Memory, costs.Time)) - // storage cost is the cost of storing 1 sector for the remaining duration. - costs.StorageCost = settings.StoragePrice.Mul64(SectorSize * duration) - // additional collateral is the collateral the host is expected to put up - // per sector per block. - // note: in siad the additional collateral does not consider remaining - // duration. - costs.AdditionalCollateral = settings.Collateral.Mul64(SectorSize * duration) - return -} - -// UpdateSectorCost returns the cost of the update instruction. -func UpdateSectorCost(settings HostSettings, l uint64) (costs ResourceUsage) { - costs.Memory = l + SectorSize - costs.Time = 10000 - - // base cost is cost of reading and writing 1 sector - costs = ReadCost(settings, SectorSize) - costs.BaseCost = costs.BaseCost.Add(settings.InstrUpdateSectorBaseCost).Add(writeCost(settings, SectorSize)).Add(resourceCost(settings, costs.Memory, costs.Time)) - return -} - -// DropSectorsCost returns the cost of the drop sectors instruction. -func DropSectorsCost(settings HostSettings, n uint64) (costs ResourceUsage) { - costs.BaseCost = settings.InstrDropSectorsUnitCost.Mul64(n).Add(settings.InstrDropSectorsBaseCost) - return -} - -// HasSectorCost returns the cost of the has sector instruction. -func HasSectorCost(settings HostSettings) (costs ResourceUsage) { - costs.BaseCost = settings.InstrHasSectorBaseCost - return -} - -// ReadCost returns the cost of the read instruction. -func ReadCost(settings HostSettings, l uint64) (costs ResourceUsage) { - costs.BaseCost = settings.ProgReadCost.Mul64(l).Add(settings.InstrReadBaseCost) - return -} - -// RevisionCost returns the cost of the revision instruction. -func RevisionCost(settings HostSettings) (costs ResourceUsage) { - costs.BaseCost = settings.InstrRevisionBaseCost - return -} - -// SectorRootsCost returns the cost of executing the contract roots instruction. -func SectorRootsCost(settings HostSettings, sectors uint64) (costs ResourceUsage) { - costs.Memory = 32 * sectors - costs.Time = 10000 - costs.BaseCost = settings.InstrSectorRootsBaseCost.Add(resourceCost(settings, costs.Memory, costs.Time)) - return -} - -// SwapSectorCost returns the cost of the swap sector instruction. -func SwapSectorCost(settings HostSettings) (costs ResourceUsage) { - costs.BaseCost = settings.InstrSwapSectorBaseCost - return -} - -// UpdateRegistryCost returns the cost of the update registry instruction. -func UpdateRegistryCost(settings HostSettings) (costs ResourceUsage) { - costs.BaseCost = writeCost(settings, 256).Add(settings.InstrUpdateRegistryBaseCost) - // storing 256 bytes for 5 years - costs.StorageCost = settings.StoragePrice.Mul64(256 * 5 * blocksPerYear) - return -} - -// ReadRegistryCost returns the cost of the read registry instruction. -func ReadRegistryCost(settings HostSettings) (costs ResourceUsage) { - costs.BaseCost = writeCost(settings, 256).Add(settings.InstrReadRegistryBaseCost) - // storing 256 bytes for 10 years - costs.StorageCost = settings.StoragePrice.Mul64(256 * 10 * blocksPerYear) - return -} diff --git a/v2/net/rhp/merkle.go b/v2/net/rhp/merkle.go deleted file mode 100644 index 3547acb7..00000000 --- a/v2/net/rhp/merkle.go +++ /dev/null @@ -1,412 +0,0 @@ -package rhp - -import ( - "bytes" - "errors" - "io" - "math" - "math/bits" - "unsafe" - - "go.sia.tech/core/v2/internal/blake2b" - "go.sia.tech/core/v2/types" -) - -// Most of these algorithms are derived from "Streaming Merkle Proofs within -// Binary Numeral Trees", available at https://eprint.iacr.org/2021/038 - -const ( - // SectorSize is the size of one sector in bytes. - SectorSize = 1 << 22 // 4 MiB - - // LeafSize is the size of one leaf in bytes. - LeafSize = 64 - - // LeavesPerSector is the number of leaves in one sector. - LeavesPerSector = SectorSize / LeafSize -) - -// Check that LeafSize == len(types.StorageProof{}.Leaf). We *could* define -// LeafSize = len(types.StorageProof{}.Leaf), but then it would be an int -// instead of a an untyped constant. -var _ [LeafSize]byte = [len(types.StorageProof{}.Leaf)]byte{} - -// A proofAccumulator is a specialized accumulator for building and verifying -// Merkle proofs. -type proofAccumulator struct { - trees [64]types.Hash256 - numLeaves uint64 -} - -func (pa *proofAccumulator) hasNodeAtHeight(height int) bool { - return pa.numLeaves&(1<>2)&(1<<(len(sa.trees)-i-1)) != 0 -} - -func (sa *sectorAccumulator) appendNode(h types.Hash256) { - sa.nodeBuf[sa.numLeaves%4] = h - sa.numLeaves++ - if sa.numLeaves%4 == 0 { - sa.numLeaves -= 4 // hack: offset mergeNodeBuf adding 4 - sa.mergeNodeBuf() - } -} - -func (sa *sectorAccumulator) appendLeaves(leaves []byte) { - if len(leaves)%LeafSize != 0 { - panic("appendLeaves: illegal input size") - } - rem := len(leaves) % (LeafSize * 4) - for i := 0; i < len(leaves)-rem; i += LeafSize * 4 { - blake2b.SumLeaves(&sa.nodeBuf, (*[4][64]byte)(unsafe.Pointer(&leaves[i]))) - sa.mergeNodeBuf() - } - for i := len(leaves) - rem; i < len(leaves); i += LeafSize { - sa.appendNode(blake2b.SumLeaf((*[64]byte)(unsafe.Pointer(&leaves[i])))) - } -} - -func (sa *sectorAccumulator) mergeNodeBuf() { - // same as in proofAccumulator, except that we operate on 8 nodes at a time, - // exploiting the fact that the two groups of 4 are contiguous in memory - nodes := &sa.nodeBuf - i := len(sa.trees) - 1 - for ; sa.hasNodeAtHeight(i); i-- { - blake2b.SumNodes(&sa.trees[i], (*[8][32]byte)(unsafe.Pointer(&sa.trees[i]))) - nodes = &sa.trees[i] - } - sa.trees[i] = *nodes - sa.numLeaves += 4 -} - -func (sa *sectorAccumulator) root() types.Hash256 { - if sa.numLeaves == 0 { - return types.Hash256{} - } - - // helper function for computing the root of four subtrees - root4 := func(nodes [4][32]byte) types.Hash256 { - // NOTE: it would be more efficient to mutate sa.trees directly, but - // that would make root non-idempotent - in := (*[8][32]byte)(unsafe.Pointer(&[2][4][32]byte{0: nodes})) - out := (*[4][32]byte)(unsafe.Pointer(in)) - blake2b.SumNodes(out, in) - blake2b.SumNodes(out, in) - return out[0] - } - - i := len(sa.trees) - 1 - bits.TrailingZeros32(sa.numLeaves>>2) - var root types.Hash256 - switch sa.numLeaves % 4 { - case 0: - root = root4(sa.trees[i]) - i-- - case 1: - root = sa.nodeBuf[0] - case 2: - root = blake2b.SumPair(sa.nodeBuf[0], sa.nodeBuf[1]) - case 3: - root = blake2b.SumPair(blake2b.SumPair(sa.nodeBuf[0], sa.nodeBuf[1]), sa.nodeBuf[2]) - } - for ; i >= 0; i-- { - if sa.hasNodeAtHeight(i) { - root = blake2b.SumPair(root4(sa.trees[i]), root) - } - } - return root -} - -// SectorRoot computes the Merkle root of a sector. -func SectorRoot(sector *[SectorSize]byte) types.Hash256 { - var sa sectorAccumulator - sa.appendLeaves(sector[:]) - return sa.root() -} - -// ReaderRoot returns the Merkle root of the supplied stream, which must contain -// an integer multiple of leaves. -func ReaderRoot(r io.Reader) (types.Hash256, error) { - var s sectorAccumulator - leafBatch := make([]byte, LeafSize*16) - for { - n, err := io.ReadFull(r, leafBatch) - if err == io.EOF { - break - } else if err == io.ErrUnexpectedEOF { - if n%LeafSize != 0 { - return types.Hash256{}, errors.New("stream does not contain integer multiple of leaves") - } - } else if err != nil { - return types.Hash256{}, err - } - s.appendLeaves(leafBatch[:n]) - } - return s.root(), nil -} - -// ReadSector reads a single sector from r and calculates its root. -func ReadSector(r io.Reader) (types.Hash256, *[SectorSize]byte, error) { - var sector [SectorSize]byte - buf := bytes.NewBuffer(sector[:0]) - root, err := ReaderRoot(io.TeeReader(io.LimitReader(r, SectorSize), buf)) - if buf.Len() != SectorSize { - return types.Hash256{}, nil, io.ErrUnexpectedEOF - } - return root, §or, err -} - -// MetaRoot calculates the root of a set of existing Merkle roots. -func MetaRoot(roots []types.Hash256) types.Hash256 { - // sectorAccumulator is only designed to store one sector's worth of leaves, - // so we'll panic if we insert more than leavesPerSector leaves. To - // compensate, call MetaRoot recursively. - if len(roots) <= LeavesPerSector { - var sa sectorAccumulator - for _, r := range roots { - sa.appendNode(r) - } - return sa.root() - } - // split at largest power of two - split := 1 << (bits.Len(uint(len(roots)-1)) - 1) - return blake2b.SumPair(MetaRoot(roots[:split]), MetaRoot(roots[split:])) -} - -// ProofSize returns the size of a Merkle proof for the leaf i within a tree -// containing n leaves. -func ProofSize(n, i uint64) uint64 { - return RangeProofSize(n, i, i+1) -} - -// RangeProofSize returns the size of a Merkle proof for the leaf range [start, -// end) within a tree containing n leaves. -func RangeProofSize(n, start, end uint64) uint64 { - leftHashes := bits.OnesCount64(start) - pathMask := uint64(1)< max { - return 1 << max - } - return 1 << ideal -} - -// BuildProof constructs a proof for the segment range [start, end). If a non- -// nil precalc function is provided, it will be used to supply precalculated -// subtree Merkle roots. For example, if the root of the left half of the -// Merkle tree is precomputed, precalc should return it for i == 0 and j == -// SegmentsPerSector/2. If a precalculated root is not available, precalc -// should return the zero hash. -func BuildProof(sector *[SectorSize]byte, start, end uint64, precalc func(i, j uint64) types.Hash256) []types.Hash256 { - if end > LeavesPerSector || start > end || start == end { - panic("BuildProof: illegal proof range") - } - if precalc == nil { - precalc = func(i, j uint64) (h types.Hash256) { return } - } - - // define a helper function for later - var s sectorAccumulator - subtreeRoot := func(i, j uint64) types.Hash256 { - s.reset() - s.appendLeaves(sector[i*LeafSize : j*LeafSize]) - return s.root() - } - - // we build the proof by recursively enumerating subtrees, left to right. - // If a subtree is inside the segment range, we can skip it (because the - // verifier has the segments); otherwise, we add its Merkle root to the - // proof. - // - // NOTE: this operation might be a little tricky to understand because - // it's a recursive function with side effects (appending to proof), but - // this is the simplest way I was able to implement it. Namely, it has the - // important advantage of being symmetrical to the Verify operation. - proof := make([]types.Hash256, 0, ProofSize(LeavesPerSector, start)) - var rec func(uint64, uint64) - rec = func(i, j uint64) { - if i >= start && j <= end { - // this subtree contains only data segments; skip it - } else if j <= start || i >= end { - // this subtree does not contain any data segments; add its Merkle - // root to the proof. If we have a precalculated root, use that; - // otherwise, calculate it from scratch. - if h := precalc(i, j); h != (types.Hash256{}) { - proof = append(proof, h) - } else { - proof = append(proof, subtreeRoot(i, j)) - } - } else { - // this subtree partially overlaps the data segments; split it - // into two subtrees and recurse on each - mid := (i + j) / 2 - rec(i, mid) - rec(mid, j) - } - } - rec(0, LeavesPerSector) - return proof -} - -// BuildSectorRangeProof constructs a proof for the sector range [start, end). -func BuildSectorRangeProof(sectorRoots []types.Hash256, start, end uint64) []types.Hash256 { - numLeaves := uint64(len(sectorRoots)) - if numLeaves == 0 { - return nil - } else if end > numLeaves || start > end || start == end { - panic("BuildSectorRangeProof: illegal proof range") - } - - proof := make([]types.Hash256, 0, ProofSize(numLeaves, start)) - buildRange := func(i, j uint64) { - for i < j && i < numLeaves { - subtreeSize := nextSubtreeSize(i, j) - if i+subtreeSize > numLeaves { - subtreeSize = numLeaves - i - } - proof = append(proof, MetaRoot(sectorRoots[i:][:subtreeSize])) - i += subtreeSize - } - } - buildRange(0, start) - buildRange(end, math.MaxInt32) - return proof -} - -// A RangeProofVerifier allows range proofs to be verified in streaming fashion. -type RangeProofVerifier struct { - start, end uint64 - roots []types.Hash256 -} - -// ReadFrom implements io.ReaderFrom. -func (rpv *RangeProofVerifier) ReadFrom(r io.Reader) (int64, error) { - var total int64 - i, j := rpv.start, rpv.end - for i < j { - subtreeSize := nextSubtreeSize(i, j) - n := int64(subtreeSize * LeafSize) - root, err := ReaderRoot(io.LimitReader(r, n)) - if err != nil { - return total, err - } - total += n - rpv.roots = append(rpv.roots, root) - i += subtreeSize - } - return total, nil -} - -// Verify verifies the supplied proof, using the data ingested from ReadFrom. -func (rpv *RangeProofVerifier) Verify(proof []types.Hash256, root types.Hash256) bool { - if uint64(len(proof)) != RangeProofSize(LeavesPerSector, rpv.start, rpv.end) { - return false - } - var acc proofAccumulator - consume := func(roots *[]types.Hash256, i, j uint64) { - for i < j && len(*roots) > 0 { - subtreeSize := nextSubtreeSize(i, j) - height := bits.TrailingZeros(uint(subtreeSize)) // log2 - acc.insertNode((*roots)[0], height) - *roots = (*roots)[1:] - i += subtreeSize - } - } - consume(&proof, 0, rpv.start) - consume(&rpv.roots, rpv.start, rpv.end) - consume(&proof, rpv.end, LeavesPerSector) - return acc.root() == root -} - -// NewRangeProofVerifier returns a RangeProofVerifier for the sector range -// [start, end). -func NewRangeProofVerifier(start, end uint64) *RangeProofVerifier { - return &RangeProofVerifier{ - start: start, - end: end, - } -} - -// VerifyAppendProof verifies a proof produced by BuildAppendProof. -func VerifyAppendProof(numLeaves uint64, treeHashes []types.Hash256, sectorRoot, oldRoot, newRoot types.Hash256) bool { - acc := proofAccumulator{numLeaves: numLeaves} - for i := range acc.trees { - if acc.hasNodeAtHeight(i) && len(treeHashes) > 0 { - acc.trees[i] = treeHashes[0] - treeHashes = treeHashes[1:] - } - } - if acc.root() != oldRoot { - return false - } - acc.insertNode(sectorRoot, 0) - return acc.root() == newRoot -} diff --git a/v2/net/rhp/merkle_test.go b/v2/net/rhp/merkle_test.go deleted file mode 100644 index 6b67cb12..00000000 --- a/v2/net/rhp/merkle_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package rhp - -import ( - "bytes" - "math/bits" - "reflect" - "testing" - - "go.sia.tech/core/v2/types" - "golang.org/x/crypto/blake2b" - "lukechampine.com/frand" -) - -func leafHash(seg []byte) types.Hash256 { - return blake2b.Sum256(append([]byte{0}, seg...)) -} - -func nodeHash(left, right types.Hash256) types.Hash256 { - return blake2b.Sum256(append([]byte{1}, append(left[:], right[:]...)...)) -} - -func refSectorRoot(sector *[SectorSize]byte) types.Hash256 { - roots := make([]types.Hash256, LeavesPerSector) - for i := range roots { - roots[i] = leafHash(sector[i*LeafSize:][:LeafSize]) - } - return recNodeRoot(roots) -} - -func recNodeRoot(roots []types.Hash256) types.Hash256 { - switch len(roots) { - case 0: - return types.Hash256{} - case 1: - return roots[0] - default: - // split at largest power of two - split := 1 << (bits.Len(uint(len(roots)-1)) - 1) - return nodeHash( - recNodeRoot(roots[:split]), - recNodeRoot(roots[split:]), - ) - } -} - -func TestSectorRoot(t *testing.T) { - // test some known roots - var sector [SectorSize]byte - if SectorRoot(§or).String() != "h:50ed59cecd5ed3ca9e65cec0797202091dbba45272dafa3faa4e27064eedd52c" { - t.Error("wrong Merkle root for empty sector") - } - sector[0] = 1 - if SectorRoot(§or).String() != "h:8c20a2c90a733a5139cc57e45755322e304451c3434b0c0a0aad87f2f89a44ab" { - t.Error("wrong Merkle root for sector[0] = 1") - } - sector[0] = 0 - sector[SectorSize-1] = 1 - if SectorRoot(§or).String() != "h:d0ab6691d76750618452e920386e5f6f98fdd1219a70a06f06ef622ac6c6373c" { - t.Error("wrong Merkle root for sector[SectorSize-1] = 1") - } - - // test some random roots against a reference implementation - for i := 0; i < 5; i++ { - frand.Read(sector[:]) - if SectorRoot(§or) != refSectorRoot(§or) { - t.Error("SectorRoot does not match reference implementation") - } - } - - // SectorRoot should not allocate - allocs := testing.AllocsPerRun(5, func() { - _ = SectorRoot(§or) - }) - if allocs > 0 { - t.Error("expected SectorRoot to allocate 0 times, got", allocs) - } -} - -func BenchmarkSectorRoot(b *testing.B) { - b.ReportAllocs() - var sector [SectorSize]byte - b.SetBytes(SectorSize) - for i := 0; i < b.N; i++ { - _ = SectorRoot(§or) - } -} - -func TestMetaRoot(t *testing.T) { - // test some known roots - if MetaRoot(nil) != (types.Hash256{}) { - t.Error("wrong Merkle root for empty tree") - } - roots := make([]types.Hash256, 1) - roots[0] = frand.Entropy256() - if MetaRoot(roots) != roots[0] { - t.Error("wrong Merkle root for single root") - } - roots = make([]types.Hash256, 32) - if MetaRoot(roots).String() != "h:1c23727030051d1bba1c887273addac2054afbd6926daddef6740f4f8bf1fb7f" { - t.Error("wrong Merkle root for 32 empty roots") - } - roots[0][0] = 1 - if MetaRoot(roots).String() != "h:c5da05749139505704ea18a5d92d46427f652ac79c5f5712e4aefb68e20dffb8" { - t.Error("wrong Merkle root for roots[0][0] = 1") - } - - // test some random roots against a reference implementation - for i := 0; i < 5; i++ { - for j := range roots { - roots[j] = frand.Entropy256() - } - if MetaRoot(roots) != recNodeRoot(roots) { - t.Error("MetaRoot does not match reference implementation") - } - } - // test some random tree sizes - for i := 0; i < 10; i++ { - roots := make([]types.Hash256, frand.Intn(LeavesPerSector)) - if MetaRoot(roots) != recNodeRoot(roots) { - t.Error("MetaRoot does not match reference implementation") - } - } - - roots = roots[:5] - if MetaRoot(roots) != recNodeRoot(roots) { - t.Error("MetaRoot does not match reference implementation") - } - - allocs := testing.AllocsPerRun(10, func() { - _ = MetaRoot(roots) - }) - if allocs > 0 { - t.Error("expected MetaRoot to allocate 0 times, got", allocs) - } - - // test a massive number of roots, larger than a single stack can store - const sectorsPerTerabyte = 262145 - roots = make([]types.Hash256, sectorsPerTerabyte) - if MetaRoot(roots) != recNodeRoot(roots) { - t.Error("MetaRoot does not match reference implementation") - } -} - -func BenchmarkMetaRoot1TB(b *testing.B) { - const sectorsPerTerabyte = 262144 - roots := make([]types.Hash256, sectorsPerTerabyte) - b.SetBytes(sectorsPerTerabyte * 32) - for i := 0; i < b.N; i++ { - _ = MetaRoot(roots) - } -} - -func TestProofAccumulator(t *testing.T) { - var pa proofAccumulator - - // test some known roots - if pa.root() != (types.Hash256{}) { - t.Error("wrong root for empty accumulator") - } - - roots := make([]types.Hash256, 32) - for _, root := range roots { - pa.insertNode(root, 0) - } - if pa.root().String() != "h:1c23727030051d1bba1c887273addac2054afbd6926daddef6740f4f8bf1fb7f" { - t.Error("wrong root for 32 empty roots") - } - - pa = proofAccumulator{} - roots[0][0] = 1 - for _, root := range roots { - pa.insertNode(root, 0) - } - if pa.root().String() != "h:c5da05749139505704ea18a5d92d46427f652ac79c5f5712e4aefb68e20dffb8" { - t.Error("wrong root for roots[0][0] = 1") - } - - // test some random roots against a reference implementation - for i := 0; i < 5; i++ { - var pa proofAccumulator - for j := range roots { - roots[j] = frand.Entropy256() - pa.insertNode(roots[j], 0) - } - if pa.root() != recNodeRoot(roots) { - t.Error("root does not match reference implementation") - } - } - - // test an odd number of roots - pa = proofAccumulator{} - roots = roots[:5] - for _, root := range roots { - pa.insertNode(root, 0) - } - refRoot := recNodeRoot([]types.Hash256{recNodeRoot(roots[:4]), roots[4]}) - if pa.root() != refRoot { - t.Error("root does not match reference implementation") - } -} - -func TestBuildProof(t *testing.T) { - // test some known proofs - var sector [SectorSize]byte - frand.Read(sector[:]) - sectorRoot := SectorRoot(§or) - segmentRoots := make([]types.Hash256, LeavesPerSector) - for i := range segmentRoots { - segmentRoots[i] = leafHash(sector[i*LeafSize:][:LeafSize]) - } - - proof := BuildProof(§or, 0, LeavesPerSector, nil) - if len(proof) != 0 { - t.Error("BuildProof constructed an incorrect proof for the entire sector") - } else if RangeProofSize(LeavesPerSector, 0, LeavesPerSector) != uint64(len(proof)) { - t.Error("wrong RangeProofSize for entire sector") - } - - proof = BuildProof(§or, 0, 1, nil) - if ProofSize(LeavesPerSector, 0) != uint64(len(proof)) { - t.Error("wrong RangeProofSize for first leaf") - } - hash := leafHash(sector[:64]) - for i := range proof { - hash = nodeHash(hash, proof[i]) - } - if hash != sectorRoot { - t.Error("BuildProof constructed an incorrect proof for the first segment") - } - - proof = BuildProof(§or, LeavesPerSector-1, LeavesPerSector, nil) - if ProofSize(LeavesPerSector, LeavesPerSector-1) != uint64(len(proof)) { - t.Error("wrong RangeProofSize for last leaf") - } - hash = leafHash(sector[len(sector)-64:]) - for i := range proof { - hash = nodeHash(proof[len(proof)-i-1], hash) - } - if hash != sectorRoot { - t.Error("BuildProof constructed an incorrect proof for the last segment") - } - - proof = BuildProof(§or, 10, 11, nil) - if ProofSize(LeavesPerSector, 10) != uint64(len(proof)) { - t.Error("wrong RangeProofSize for leaf 10") - } - hash = leafHash(sector[10*64:][:64]) - hash = nodeHash(hash, proof[2]) - hash = nodeHash(proof[1], hash) - hash = nodeHash(hash, proof[3]) - hash = nodeHash(proof[0], hash) - for i := 4; i < len(proof); i++ { - hash = nodeHash(hash, proof[i]) - } - if hash != sectorRoot { - t.Error("BuildProof constructed an incorrect proof for a middle segment") - } - - // this is the largest possible proof - var midl, midr uint64 = LeavesPerSector/2 - 1, LeavesPerSector/2 + 1 - proof = BuildProof(§or, midl, midr, nil) - if RangeProofSize(LeavesPerSector, midl, midr) != uint64(len(proof)) { - t.Error("wrong RangeProofSize for middle leaves") - } - left := leafHash(sector[midl*64:][:64]) - for i := 0; i < len(proof)/2; i++ { - left = nodeHash(proof[len(proof)/2-i-1], left) - } - right := leafHash(sector[(midr-1)*64:][:64]) - for i := len(proof) / 2; i < len(proof); i++ { - right = nodeHash(right, proof[i]) - } - if nodeHash(left, right) != sectorRoot { - t.Error("BuildProof constructed an incorrect proof for worst-case inputs") - } - - // test a proof with precomputed inputs - leftRoots := make([]types.Hash256, LeavesPerSector/2) - for i := range leftRoots { - leftRoots[i] = leafHash(sector[i*LeafSize:][:LeafSize]) - } - left = MetaRoot(leftRoots) - precalc := func(i, j uint64) (h types.Hash256) { - if i == 0 && j == LeavesPerSector/2 { - h = left - } - return - } - proof = BuildProof(§or, LeavesPerSector-1, LeavesPerSector, precalc) - recalcProof := BuildProof(§or, LeavesPerSector-1, LeavesPerSector, nil) - if !reflect.DeepEqual(proof, recalcProof) { - t.Fatal("precalc failed") - } -} - -func TestBuildSectorRangeProof(t *testing.T) { - // test some known proofs - sectorRoots := make([]types.Hash256, 16) - for i := range sectorRoots { - sectorRoots[i] = frand.Entropy256() - } - - proof := BuildSectorRangeProof(sectorRoots, 0, uint64(len(sectorRoots))) - if len(proof) != 0 { - t.Error("BuildSectorRangeProof constructed an incorrect proof for the entire tree") - } - - proof = BuildSectorRangeProof(sectorRoots[:2], 0, 1) - hash := nodeHash(sectorRoots[0], proof[0]) - if hash != MetaRoot(sectorRoots[:2]) { - t.Error("BuildSectorRangeProof constructed an incorrect proof for the first sector") - } - - proof = BuildSectorRangeProof(sectorRoots[:4], 0, 2) - hash = nodeHash(sectorRoots[0], sectorRoots[1]) - hash = nodeHash(hash, proof[0]) - if hash != MetaRoot(sectorRoots[:4]) { - t.Error("BuildSectorRangeProof constructed an incorrect proof for the first two sectors") - } - - proof = BuildSectorRangeProof(sectorRoots[:5], 0, 2) - hash = nodeHash(sectorRoots[0], sectorRoots[1]) - hash = nodeHash(hash, proof[0]) - hash = nodeHash(hash, proof[1]) - if hash != MetaRoot(sectorRoots[:5]) { - t.Error("BuildSectorRangeProof constructed an incorrect proof for the first two sectors") - } - - // this is the largest possible proof - proof = BuildSectorRangeProof(sectorRoots, 7, 9) - left := sectorRoots[7] - left = nodeHash(proof[2], left) - left = nodeHash(proof[1], left) - left = nodeHash(proof[0], left) - right := sectorRoots[8] - right = nodeHash(right, proof[3]) - right = nodeHash(right, proof[4]) - right = nodeHash(right, proof[5]) - hash = nodeHash(left, right) - if hash != MetaRoot(sectorRoots) { - t.Error("BuildProof constructed an incorrect proof for worst-case inputs") - } -} - -func TestReadSector(t *testing.T) { - var expected [SectorSize]byte - frand.Read(expected[:256]) - buf := bytes.NewBuffer(nil) - buf.Write(expected[:]) - - expectedRoot := refSectorRoot(&expected) - root, sector, err := ReadSector(buf) - if err != nil { - t.Fatal(err) - } else if expectedRoot != root { - t.Fatalf("incorrect root: expected %s, got %s", expected, root) - } else if !bytes.Equal(sector[:], expected[:]) { - t.Fatalf("incorrect data: expected %v, got %v", expected, sector) - } - - buf.Reset() - buf.Write(expected[:len(expected)-100]) - _, _, err = ReadSector(buf) - if err == nil { - t.Fatal("expected read error") - } -} - -func BenchmarkReadSector(b *testing.B) { - buf := bytes.NewBuffer(nil) - buf.Grow(SectorSize) - - sector := make([]byte, SectorSize) - frand.Read(sector[:256]) - - b.SetBytes(SectorSize) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - buf.Reset() - buf.Write(sector) - _, _, err := ReadSector(buf) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/v2/net/rhp/registry.go b/v2/net/rhp/registry.go deleted file mode 100644 index 213d467f..00000000 --- a/v2/net/rhp/registry.go +++ /dev/null @@ -1,187 +0,0 @@ -package rhp - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - - "go.sia.tech/core/v2/types" -) - -const ( - // EntryTypeArbitrary is a registry value where all data is arbitrary. - EntryTypeArbitrary = iota + 1 - // EntryTypePubKey is a registry value where the first 20 bytes of data - // corresponds to the hash of a host's public key. - EntryTypePubKey -) - -const ( - // MaxValueDataSize is the maximum size of a Value's Data - // field. - MaxValueDataSize = 113 -) - -// A RegistryValue is stored in the host registry. -type RegistryValue struct { - Tweak types.Hash256 - Data []byte - Revision uint64 - Type uint8 - - PublicKey types.PublicKey - Signature types.Signature -} - -// Key returns the key for the registry value. -func (r *RegistryValue) Key() types.Hash256 { - return RegistryKey(r.PublicKey, r.Tweak) -} - -// Hash returns the hash of the Value used for signing -// the entry. -func (r *RegistryValue) Hash() types.Hash256 { - h := types.NewHasher() - - h.E.Write(r.Tweak[:]) - h.E.WriteBytes(r.Data) - h.E.WriteUint64(r.Revision) - h.E.WriteUint64(uint64(r.Type)) - - return h.Sum() -} - -// Work returns the work of a Value. -func (r *RegistryValue) Work() types.Work { - var data []byte - switch r.Type { - case EntryTypePubKey: - // for public key entries the first 20 bytes represent the - // public key of the host, ignore it for work calculations. - data = r.Data[20:] - default: - data = r.Data - } - - h := types.NewHasher() - - h.E.Write(r.Tweak[:]) - h.E.WriteBytes(data) - h.E.WriteUint64(r.Revision) - - return types.WorkRequiredForHash(types.BlockID(h.Sum())) -} - -// MaxLen returns the maximum length of an encoded Value. Implements -// rpc.Object. -func (r *RegistryValue) MaxLen() int { - return 32 + 8 + MaxValueDataSize + 8 + 1 + 32 + 64 -} - -// EncodeTo encodes a Value to an Encoder. Implements types.EncoderTo. -func (r *RegistryValue) EncodeTo(e *types.Encoder) { - r.Tweak.EncodeTo(e) - e.WriteBytes(r.Data) - e.WriteUint64(r.Revision) - e.WriteUint8(r.Type) - r.PublicKey.EncodeTo(e) - r.Signature.EncodeTo(e) -} - -// DecodeFrom decodes a Value from a Decoder. Implements types.DecoderFrom. -func (r *RegistryValue) DecodeFrom(d *types.Decoder) { - r.Tweak.DecodeFrom(d) - r.Data = make([]byte, d.ReadPrefix()) - d.Read(r.Data) - r.Revision = d.ReadUint64() - r.Type = d.ReadUint8() - r.PublicKey.DecodeFrom(d) - r.Signature.DecodeFrom(d) -} - -// RegistryKey is the unique key for a RegistryValue. -func RegistryKey(pub types.PublicKey, tweak types.Hash256) types.Hash256 { - // v1 compat registry key - // ed25519 specifier + LE uint64 pub key length + public key + tweak - buf := make([]byte, 16+8+32+32) - copy(buf, "ed25519") - binary.LittleEndian.PutUint64(buf[16:], 32) - copy(buf[24:], pub[:]) - copy(buf[56:], tweak[:]) - return types.HashBytes(buf) -} - -// RegistryHostID returns the ID hash of the host for primary registry entries. -func RegistryHostID(pub types.PublicKey) types.Hash256 { - // v1 compat host public key hash - // ed25519 specifier + LE uint64 pub key length + public key - buf := make([]byte, 16+8+32) - copy(buf, "ed25519") - binary.LittleEndian.PutUint64(buf[16:], 32) - copy(buf[24:], pub[:]) - return types.HashBytes(buf) -} - -// ValidateRegistryEntry validates the fields of a registry entry. -func ValidateRegistryEntry(value RegistryValue) (err error) { - switch value.Type { - case EntryTypeArbitrary: - break // no extra validation required - case EntryTypePubKey: - // pub key entries have the first 20 bytes of the host's pub key hash - // prefixed to the data. - if len(value.Data) < 20 { - return errors.New("expected host public key hash") - } - default: - return fmt.Errorf("invalid registry value type: %d", value.Type) - } - - switch { - case !value.PublicKey.VerifyHash(value.Hash(), value.Signature): - return errors.New("registry value signature invalid") - case len(value.Data) > MaxValueDataSize: - return fmt.Errorf("registry value too large: %d", len(value.Data)) - } - - return nil -} - -// ValidateRegistryUpdate validates a registry update against the current entry. -// An updated registry entry must have a greater revision number, more work, or -// be replacing a non-primary registry entry. -func ValidateRegistryUpdate(old, update RegistryValue, hostID types.Hash256) error { - // if the new revision is greater than the current revision, the update is - // valid. - if update.Revision > old.Revision { - return nil - } else if update.Revision < old.Revision { - return errors.New("update revision must be greater than current revision") - } - - // if the revision number is the same, but the work is greater, the update - // is valid. - if w := update.Work().Cmp(old.Work()); w > 0 { - return nil - } else if w < 0 { - return errors.New("update must have greater work or greater revision number than current entry") - } - - // if the update entry is an arbitrary value entry, the update is invalid. - if update.Type == EntryTypeArbitrary { - return errors.New("update must be a primary entry or have a greater revision number") - } - - // if the updated entry is not a primary entry, it is invalid. - if !bytes.Equal(update.Data[:20], hostID[:20]) { - return errors.New("update must be a primary entry or have a greater revision number") - } - - // if the update and current entry are both primary, the update is invalid - if old.Type == EntryTypePubKey && bytes.Equal(old.Data[:20], hostID[:20]) { - return errors.New("update revision must be greater than current revision") - } - - return nil -} diff --git a/v2/net/rhp/registry_test.go b/v2/net/rhp/registry_test.go deleted file mode 100644 index e8d82e4e..00000000 --- a/v2/net/rhp/registry_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package rhp - -import ( - "encoding/hex" - "testing" - - "go.sia.tech/core/v2/types" -) - -func mustParseKey(key string) (p [32]byte) { - n, err := hex.Decode(p[:], []byte(key)) - if err != nil { - panic(err) - } else if n != 32 { - panic("invalid key") - } - return -} - -// TestRegistryKeyCompat tests that registry keys remain compatible with Sia v1. -// Keys were generated using go.sia.tech/siad/modules.DeriveRegistryEntryID -func TestRegistryKeyCompat(t *testing.T) { - tests := []struct { - pub types.PublicKey - tweak types.Hash256 - want types.Hash256 - }{ - { - mustParseKey("8cde791eae011cdd06066f6c3518fdc4064461a3e2c8733e5206cca8aba373e2"), - mustParseKey("f245223322f20f4809825d473450067087f5884c13022256cb76a700458a509a"), - mustParseKey("34dda442a65dd3a7a3c0ac09f88b620c42d850cd7890efc13b4c68889f5e7173"), - }, - { - mustParseKey("9d29a49770bb2dd560150977d49d23c434ff2851457d5496787e514b1cca83b5"), - mustParseKey("cffb1827fb71ac03dfc3fbab91718149d0381d98c3782ccd7612c11b1b21d805"), - mustParseKey("d2cfff9578531baabb583d9c81afbeb1484a2a35257d91824636475e84eff434"), - }, - { - mustParseKey("82966a49411b1fc4c1458b8cbc44b53a98a4bdeb362eb8c6978c91b9c5797bc9"), - mustParseKey("80d5524d1a387a48d424e00fcd847b92d2b42d6d3a4d9e86857831cd3bece3c5"), - mustParseKey("1df61f3f42a0308ecc693fb80a834a2af86b82d7d486444ffcdc61becbb2803d"), - }, - { - mustParseKey("a802b890c70b68c1c2b4825f9164c4ec7baf82f645b315fe3083ca638a8715b3"), - mustParseKey("7bc3b72a1c6e1c3d206fad2eb4f0cfa47988fd60563dfb237a4c2c4d1490d625"), - mustParseKey("2328b2deed4f8094f560fa8091c7d4dae07806da4748324d4d67cae89f0838fc"), - }, - { - mustParseKey("f76f05154d7ac34edcd0bb6b352d4b71f4cbd28a83c1c052bed7ad99b2437461"), - mustParseKey("427ab98605015233d4520edfc4d93437eb55814fd225ba7c9d8fab2aa73ff956"), - mustParseKey("2c9732226007b9784e8b248d0c8fc285f205d8504fd66a039d283671b11959ba"), - }, - { - mustParseKey("8fe685ea6759124abc5dde9c7fecfcd887885a9dea3587b918fd1f5cab6d636b"), - mustParseKey("6314826c074e2dd622e2d54c4353b1f867b4d43c4d07e4abd74ebf17a9165980"), - mustParseKey("3850c8838e5dc74e560313034c60117637003c4bd675f782f230083bcd6ffb6b"), - }, - { - mustParseKey("7814ba3c6148222fd3fea4eb70fa65bec9ce816691e6c9edab025ffed4ea832c"), - mustParseKey("ccd0a7774a67280ec9fdf2b6811f576e92a68b474c8ff4a24c2afd188bb010ab"), - mustParseKey("340e7d292ad5f730a20a7179163238373b9765cfb8dd3bc3981882e61e7c89a4"), - }, - { - mustParseKey("aedb3b96ad13a149644baa819d92b134125bf8f5d6d4c22c8601e9e6488b3c82"), - mustParseKey("eb5df78f2eb11dbb385506f2dfae35bc0514054abce3c150aecb53a0af1df096"), - mustParseKey("79feab234c2872c9d5cf1faebb88006854e2642053f7d0eaf71aec139838716e"), - }, - { - mustParseKey("219ca56c819b394857b37015acde1c4f30c620b6bcf4fbc0d32ae63793e8a979"), - mustParseKey("fd1e350540fefe957bc093c00b05b10da2f22fdb8b4b4b8898877478785715f2"), - mustParseKey("15b55c040eb5876b72b8cb59c37e470fdba2f7b60f7d94fb00b8cdcfc88d7784"), - }, - { - mustParseKey("0bcce4ac4e18f7e406a2e60556735048c88cbde6597c223b46f50bd780106830"), - mustParseKey("eabe733e323b4b35b9e820ae17150c0215c7566747258075c0a15845c371dd77"), - mustParseKey("36e5da5e2000fd884a509e1fcb98b0b9690371af7f0bc8124ef07cc36ddccd60"), - }, - } - for _, tt := range tests { - if key := RegistryKey(tt.pub, tt.tweak); key != tt.want { - t.Errorf("RegistryKey() = %v, want %v", key, tt.want) - } - } -} diff --git a/v2/net/rhp/rpc.go b/v2/net/rhp/rpc.go deleted file mode 100644 index 38fe376c..00000000 --- a/v2/net/rhp/rpc.go +++ /dev/null @@ -1,1269 +0,0 @@ -package rhp - -import ( - "errors" - "fmt" - "time" - - "go.sia.tech/core/v2/net/rpc" - "go.sia.tech/core/v2/types" -) - -const defaultMaxLen = 10e3 // for revisions, proofs, etc. -const largeMaxLen = 1e6 // for transactions - -// ContractOutputs contains the output values for a FileContract. Because the -// revisions negotiated by the renter and host typically do not modify the -// output recipients, we can save some space by only sending the new values. -type ContractOutputs struct { - RenterValue types.Currency - HostValue types.Currency - MissedHostValue types.Currency -} - -// Apply sets the output values of fc according to co. -func (co ContractOutputs) Apply(fc *types.FileContract) { - fc.RenterOutput.Value = co.RenterValue - fc.HostOutput.Value = co.HostValue - fc.MissedHostValue = co.MissedHostValue -} - -// RPC IDs -var ( - RPCLockID = rpc.NewSpecifier("Lock") - RPCReadID = rpc.NewSpecifier("Read") - RPCSectorRootsID = rpc.NewSpecifier("SectorRoots") - RPCUnlockID = rpc.NewSpecifier("Unlock") - RPCWriteID = rpc.NewSpecifier("Write") - - RPCAccountBalanceID = rpc.NewSpecifier("AccountBalance") - RPCExecuteProgramID = rpc.NewSpecifier("ExecuteProgram") - RPCFundAccountID = rpc.NewSpecifier("FundAccount") - RPCFormContractID = rpc.NewSpecifier("FormContract") - RPCLatestRevisionID = rpc.NewSpecifier("LatestRevision") - RPCRenewContractID = rpc.NewSpecifier("RenewContract") - RPCSettingsID = rpc.NewSpecifier("Settings") -) - -// Read/Write actions -var ( - RPCWriteActionAppend = rpc.NewSpecifier("Append") - RPCWriteActionTrim = rpc.NewSpecifier("Trim") - RPCWriteActionSwap = rpc.NewSpecifier("Swap") - RPCWriteActionUpdate = rpc.NewSpecifier("Update") - - RPCReadStop = rpc.NewSpecifier("ReadStop") -) - -// RPC request/response objects -type ( - // RPCFormContractRequest contains the request parameters for the FormContract - // RPC. - RPCFormContractRequest struct { - Inputs []types.SiacoinInput - Outputs []types.SiacoinOutput - MinerFee types.Currency - Contract types.FileContract - } - - // RPCRenewContractRequest contains the request parameters for the Renew - // RPC. Resolution must contain a valid contract renewal. - RPCRenewContractRequest struct { - Inputs []types.SiacoinInput - Outputs []types.SiacoinOutput - MinerFee types.Currency - Resolution types.FileContractResolution - } - - // RPCFormContractHostAdditions contains the parent transaction, inputs, - // outputs and contract signature added by the host when negotiating a file - // contract. It is expected that the inputs are not signed yet. - RPCFormContractHostAdditions struct { - Inputs []types.SiacoinInput - Outputs []types.SiacoinOutput - ContractSignature types.Signature - } - - // RPCRenewContractHostAdditions contains the parent transaction, inputs, - // outputs, finalization and renewal signatures added by the host when - // negotiating a contract renewal. It is expected that the inputs are not - // signed yet. - RPCRenewContractHostAdditions struct { - Inputs []types.SiacoinInput - Outputs []types.SiacoinOutput - HostRollover types.Currency - FinalizationSignature types.Signature - InitialSignature types.Signature - RenewalSignature types.Signature - } - - // RPCContractSignatures contains the siacoin input signatures for a - // transaction. These signatures are sent by the renter and host during - // contract formation. - RPCContractSignatures struct { - SiacoinInputSignatures [][]types.Signature - } - - // RPCRenewContractRenterSignatures contains the siacoin input and renewal - // signature for a transaction. These signatures are sent by the renter - // during contract renewal. - RPCRenewContractRenterSignatures struct { - SiacoinInputSignatures [][]types.Signature - RenewalSignature types.Signature - } - - // RPCLockRequest contains the request parameters for the Lock RPC. - RPCLockRequest struct { - ContractID types.ElementID - Signature types.Signature - Timeout uint64 - } - - // RPCLockResponse contains the response data for the Lock RPC. - RPCLockResponse struct { - Acquired bool - NewChallenge [16]byte - Revision types.FileContract - } - - // RPCReadRequestSection is a section requested in RPCReadRequest. - RPCReadRequestSection struct { - MerkleRoot types.Hash256 - Offset uint64 - Length uint64 - } - - // RPCReadRequest contains the request parameters for the Read RPC. - RPCReadRequest struct { - Sections []RPCReadRequestSection - MerkleProof bool - - NewRevisionNumber uint64 - NewOutputs ContractOutputs - Signature types.Signature - } - - // RPCReadResponse contains the response data for the Read RPC. - RPCReadResponse struct { - Signature types.Signature - Data []byte - MerkleProof []types.Hash256 - } - - // RPCSectorRootsRequest contains the request parameters for the SectorRoots RPC. - RPCSectorRootsRequest struct { - RootOffset uint64 - NumRoots uint64 - - NewRevisionNumber uint64 - NewOutputs ContractOutputs - Signature types.Signature - } - - // RPCSectorRootsResponse contains the response data for the SectorRoots RPC. - RPCSectorRootsResponse struct { - Signature types.Signature - SectorRoots []types.Hash256 - MerkleProof []types.Hash256 - } - - // RPCWriteRequest contains the request parameters for the Write RPC. - RPCWriteRequest struct { - Actions []RPCWriteAction - MerkleProof bool - - NewRevisionNumber uint64 - NewOutputs ContractOutputs - } - - // RPCWriteAction is a generic Write action. The meaning of each field - // depends on the Type of the action. - RPCWriteAction struct { - Type rpc.Specifier - A, B uint64 - Data []byte - } - - // RPCWriteMerkleProof contains the optional Merkle proof for response data - // for the Write RPC. - RPCWriteMerkleProof struct { - OldSubtreeHashes []types.Hash256 - OldLeafHashes []types.Hash256 - NewMerkleRoot types.Hash256 - } - - // RPCWriteResponse contains the response data for the Write RPC. - RPCWriteResponse struct { - Signature types.Signature - } -) - -// price calculation functions - -// RPCReadRenterCost computes the cost of a Read RPC. -func RPCReadRenterCost(settings HostSettings, sections []RPCReadRequestSection) types.Currency { - var bandwidth uint64 - for _, sec := range sections { - proofHashes := RangeProofSize(LeavesPerSector, sec.Offset, sec.Offset+sec.Length) - bandwidth += sec.Length + proofHashes*32 - } - bandwidthPrice := settings.DownloadBandwidthPrice.Mul64(bandwidth) - return settings.InstrReadBaseCost.Add(bandwidthPrice) -} - -// RPCWriteRenterCost computes the cost of a Write RPC. -func RPCWriteRenterCost(settings HostSettings, fc types.FileContract, actions []RPCWriteAction) types.Currency { - var sectorsAdded, sectorsRemoved uint64 - for _, action := range actions { - switch action.Type { - case RPCWriteActionAppend: - sectorsAdded++ - case RPCWriteActionTrim: - sectorsRemoved -= action.A - case RPCWriteActionSwap: - default: - panic("unhanbled action type") - } - } - var storageCost types.Currency - if sectorsAdded > sectorsRemoved { - storageDuration := fc.WindowEnd - settings.BlockHeight - sectorStoragePrice := settings.StoragePrice.Mul64(SectorSize).Mul64(storageDuration) - storageCost = sectorStoragePrice.Mul64(sectorsAdded - sectorsRemoved) - } - proofSize := DiffProofSize(int(fc.Filesize/SectorSize), actions) - downloadBandwidth := uint64(proofSize) * 32 - return settings.InstrWriteBaseCost. - Add(settings.UploadBandwidthPrice.Mul64(sectorsAdded * SectorSize)). - Add(settings.DownloadBandwidthPrice.Mul64(downloadBandwidth)). - Add(storageCost) -} - -// RPCWriteHostCollateral computes the collateral for a Write RPC. -func RPCWriteHostCollateral(settings HostSettings, fc types.FileContract, actions []RPCWriteAction) types.Currency { - var sectorsAdded, sectorsRemoved uint64 - for _, action := range actions { - switch action.Type { - case RPCWriteActionAppend: - sectorsAdded++ - case RPCWriteActionTrim: - sectorsRemoved -= action.A - case RPCWriteActionSwap: - default: - panic("unhandled action type") - } - } - if sectorsAdded < sectorsRemoved { - return types.ZeroCurrency - } - collateralDuration := fc.WindowEnd - settings.BlockHeight - sectorCollateral := settings.Collateral.Mul64(SectorSize).Mul64(collateralDuration) - return sectorCollateral.Mul64(sectorsAdded - sectorsRemoved) -} - -// ProtocolObject implementations - -func writeMerkleProof(e *types.Encoder, proof []types.Hash256) { - e.WritePrefix(len(proof)) - for i := range proof { - proof[i].EncodeTo(e) - } -} - -func readMerkleProof(d *types.Decoder) (proof []types.Hash256) { - proof = make([]types.Hash256, d.ReadPrefix()) - for i := range proof { - proof[i].DecodeFrom(d) - } - return -} - -func (co *ContractOutputs) encodeTo(e *types.Encoder) { - co.RenterValue.EncodeTo(e) - co.HostValue.EncodeTo(e) - co.MissedHostValue.EncodeTo(e) -} - -func (co *ContractOutputs) decodeFrom(d *types.Decoder) { - co.RenterValue.DecodeFrom(d) - co.HostValue.DecodeFrom(d) - co.MissedHostValue.DecodeFrom(d) -} - -func (ContractOutputs) maxLen() int { - return 4 * 16 -} - -// EncodeTo implements rpc.Object. -func (r *RPCFormContractRequest) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Inputs)) - for i := range r.Inputs { - r.Inputs[i].EncodeTo(e) - } - e.WritePrefix(len(r.Outputs)) - for i := range r.Outputs { - r.Outputs[i].EncodeTo(e) - } - r.MinerFee.EncodeTo(e) - r.Contract.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCFormContractRequest) DecodeFrom(d *types.Decoder) { - r.Inputs = make([]types.SiacoinInput, d.ReadPrefix()) - for i := range r.Inputs { - r.Inputs[i].DecodeFrom(d) - } - r.Outputs = make([]types.SiacoinOutput, d.ReadPrefix()) - for i := range r.Outputs { - r.Outputs[i].DecodeFrom(d) - } - r.MinerFee.DecodeFrom(d) - r.Contract.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCFormContractRequest) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCRenewContractRequest) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Inputs)) - for i := range r.Inputs { - r.Inputs[i].EncodeTo(e) - } - e.WritePrefix(len(r.Outputs)) - for i := range r.Outputs { - r.Outputs[i].EncodeTo(e) - } - r.MinerFee.EncodeTo(e) - r.Resolution.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCRenewContractRequest) DecodeFrom(d *types.Decoder) { - r.Inputs = make([]types.SiacoinInput, d.ReadPrefix()) - for i := range r.Inputs { - r.Inputs[i].DecodeFrom(d) - } - r.Outputs = make([]types.SiacoinOutput, d.ReadPrefix()) - for i := range r.Outputs { - r.Outputs[i].DecodeFrom(d) - } - r.MinerFee.DecodeFrom(d) - r.Resolution.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCRenewContractRequest) MaxLen() int { - return largeMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCFormContractHostAdditions) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Inputs)) - for i := range r.Inputs { - r.Inputs[i].EncodeTo(e) - } - e.WritePrefix(len(r.Outputs)) - for i := range r.Outputs { - r.Outputs[i].EncodeTo(e) - } - r.ContractSignature.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCFormContractHostAdditions) DecodeFrom(d *types.Decoder) { - r.Inputs = make([]types.SiacoinInput, d.ReadPrefix()) - for i := range r.Inputs { - r.Inputs[i].DecodeFrom(d) - } - r.Outputs = make([]types.SiacoinOutput, d.ReadPrefix()) - for i := range r.Outputs { - r.Outputs[i].DecodeFrom(d) - } - r.ContractSignature.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCFormContractHostAdditions) MaxLen() int { - return largeMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCRenewContractHostAdditions) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Inputs)) - for i := range r.Inputs { - r.Inputs[i].EncodeTo(e) - } - e.WritePrefix(len(r.Outputs)) - for i := range r.Outputs { - r.Outputs[i].EncodeTo(e) - } - r.HostRollover.EncodeTo(e) - r.FinalizationSignature.EncodeTo(e) - r.InitialSignature.EncodeTo(e) - r.RenewalSignature.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCRenewContractHostAdditions) DecodeFrom(d *types.Decoder) { - r.Inputs = make([]types.SiacoinInput, d.ReadPrefix()) - for i := range r.Inputs { - r.Inputs[i].DecodeFrom(d) - } - r.Outputs = make([]types.SiacoinOutput, d.ReadPrefix()) - for i := range r.Outputs { - r.Outputs[i].DecodeFrom(d) - } - r.HostRollover.DecodeFrom(d) - r.FinalizationSignature.DecodeFrom(d) - r.InitialSignature.DecodeFrom(d) - r.RenewalSignature.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCRenewContractHostAdditions) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCContractSignatures) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.SiacoinInputSignatures)) - for i := range r.SiacoinInputSignatures { - e.WritePrefix(len(r.SiacoinInputSignatures[i])) - for j := range r.SiacoinInputSignatures[i] { - r.SiacoinInputSignatures[i][j].EncodeTo(e) - } - } -} - -// DecodeFrom implements rpc.Object. -func (r *RPCContractSignatures) DecodeFrom(d *types.Decoder) { - r.SiacoinInputSignatures = make([][]types.Signature, d.ReadPrefix()) - for i := range r.SiacoinInputSignatures { - r.SiacoinInputSignatures[i] = make([]types.Signature, d.ReadPrefix()) - for j := range r.SiacoinInputSignatures[i] { - r.SiacoinInputSignatures[i][j].DecodeFrom(d) - } - } -} - -// MaxLen implements rpc.Object. -func (r *RPCContractSignatures) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCRenewContractRenterSignatures) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.SiacoinInputSignatures)) - for i := range r.SiacoinInputSignatures { - e.WritePrefix(len(r.SiacoinInputSignatures[i])) - for j := range r.SiacoinInputSignatures[i] { - r.SiacoinInputSignatures[i][j].EncodeTo(e) - } - } - r.RenewalSignature.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCRenewContractRenterSignatures) DecodeFrom(d *types.Decoder) { - r.SiacoinInputSignatures = make([][]types.Signature, d.ReadPrefix()) - for i := range r.SiacoinInputSignatures { - r.SiacoinInputSignatures[i] = make([]types.Signature, d.ReadPrefix()) - for j := range r.SiacoinInputSignatures[i] { - r.SiacoinInputSignatures[i][j].DecodeFrom(d) - } - } - r.RenewalSignature.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCRenewContractRenterSignatures) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCLockRequest) EncodeTo(e *types.Encoder) { - r.ContractID.EncodeTo(e) - r.Signature.EncodeTo(e) - e.WriteUint64(r.Timeout) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCLockRequest) DecodeFrom(d *types.Decoder) { - r.ContractID.DecodeFrom(d) - r.Signature.DecodeFrom(d) - r.Timeout = d.ReadUint64() -} - -// MaxLen implements rpc.Object. -func (r *RPCLockRequest) MaxLen() int { - return len(r.ContractID.Source) + 8 + len(r.Signature) + 8 -} - -// EncodeTo implements rpc.Object. -func (r *RPCLockResponse) EncodeTo(e *types.Encoder) { - e.WriteBool(r.Acquired) - e.Write(r.NewChallenge[:]) - r.Revision.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCLockResponse) DecodeFrom(d *types.Decoder) { - r.Acquired = d.ReadBool() - d.Read(r.NewChallenge[:]) - r.Revision.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCLockResponse) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCReadRequest) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Sections)) - for i := range r.Sections { - r.Sections[i].MerkleRoot.EncodeTo(e) - e.WriteUint64(r.Sections[i].Offset) - e.WriteUint64(r.Sections[i].Length) - } - e.WriteBool(r.MerkleProof) - e.WriteUint64(r.NewRevisionNumber) - r.NewOutputs.encodeTo(e) - r.Signature.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCReadRequest) DecodeFrom(d *types.Decoder) { - r.Sections = make([]RPCReadRequestSection, d.ReadPrefix()) - for i := range r.Sections { - r.Sections[i].MerkleRoot.DecodeFrom(d) - r.Sections[i].Offset = d.ReadUint64() - r.Sections[i].Length = d.ReadUint64() - } - r.MerkleProof = d.ReadBool() - r.NewRevisionNumber = d.ReadUint64() - r.NewOutputs.decodeFrom(d) - r.Signature.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCReadRequest) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCReadResponse) EncodeTo(e *types.Encoder) { - r.Signature.EncodeTo(e) - e.WriteBytes(r.Data) - writeMerkleProof(e, r.MerkleProof) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCReadResponse) DecodeFrom(d *types.Decoder) { - r.Signature.DecodeFrom(d) - - // r.Data will typically be large (4 MiB), so reuse the existing capacity if - // possible. - // - // NOTE: for maximum efficiency, we should be doing this for every slice, - // but in most cases the extra performance isn't worth the aliasing risk. - dataLen := d.ReadPrefix() - if cap(r.Data) < dataLen { - r.Data = make([]byte, dataLen) - } - r.Data = r.Data[:dataLen] - d.Read(r.Data) - - r.MerkleProof = readMerkleProof(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCReadResponse) MaxLen() int { - return 16 * (1 << 20) // 16 MiB -} - -// EncodeTo implements rpc.Object. -func (r *RPCSectorRootsRequest) EncodeTo(e *types.Encoder) { - e.WriteUint64(r.RootOffset) - e.WriteUint64(r.NumRoots) - e.WriteUint64(r.NewRevisionNumber) - r.NewOutputs.encodeTo(e) - r.Signature.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCSectorRootsRequest) DecodeFrom(d *types.Decoder) { - r.RootOffset = d.ReadUint64() - r.NumRoots = d.ReadUint64() - r.NewRevisionNumber = d.ReadUint64() - r.NewOutputs.decodeFrom(d) - r.Signature.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCSectorRootsRequest) MaxLen() int { - return 8 + 8 + 8 + r.NewOutputs.maxLen() + len(r.Signature) -} - -// EncodeTo implements rpc.Object. -func (r *RPCSectorRootsResponse) EncodeTo(e *types.Encoder) { - r.Signature.EncodeTo(e) - writeMerkleProof(e, r.SectorRoots) - writeMerkleProof(e, r.MerkleProof) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCSectorRootsResponse) DecodeFrom(d *types.Decoder) { - r.Signature.DecodeFrom(d) - r.SectorRoots = readMerkleProof(d) - r.MerkleProof = readMerkleProof(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCSectorRootsResponse) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCWriteAction) EncodeTo(e *types.Encoder) { - r.Type.EncodeTo(e) - e.WriteUint64(r.A) - e.WriteUint64(r.B) - e.WriteBytes(r.Data) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCWriteAction) DecodeFrom(d *types.Decoder) { - r.Type.DecodeFrom(d) - r.A = d.ReadUint64() - r.B = d.ReadUint64() - r.Data = d.ReadBytes() -} - -// EncodeTo implements rpc.Object. -func (r *RPCWriteRequest) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(r.Actions)) - for i := range r.Actions { - r.Actions[i].EncodeTo(e) - } - e.WriteBool(r.MerkleProof) - e.WriteUint64(r.NewRevisionNumber) - r.NewOutputs.encodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCWriteRequest) DecodeFrom(d *types.Decoder) { - r.Actions = make([]RPCWriteAction, d.ReadPrefix()) - for i := range r.Actions { - r.Actions[i].DecodeFrom(d) - } - r.MerkleProof = d.ReadBool() - r.NewRevisionNumber = d.ReadUint64() - r.NewOutputs.decodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCWriteRequest) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCWriteMerkleProof) EncodeTo(e *types.Encoder) { - writeMerkleProof(e, r.OldSubtreeHashes) - writeMerkleProof(e, r.OldLeafHashes) - r.NewMerkleRoot.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCWriteMerkleProof) DecodeFrom(d *types.Decoder) { - r.OldSubtreeHashes = readMerkleProof(d) - r.OldLeafHashes = readMerkleProof(d) - r.NewMerkleRoot.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCWriteMerkleProof) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo implements rpc.Object. -func (r *RPCWriteResponse) EncodeTo(e *types.Encoder) { - r.Signature.EncodeTo(e) -} - -// DecodeFrom implements rpc.Object. -func (r *RPCWriteResponse) DecodeFrom(d *types.Decoder) { - r.Signature.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (r *RPCWriteResponse) MaxLen() int { - return 64 -} - -// RPCSettingsResponse contains the JSON-encoded settings for a host. -type RPCSettingsResponse struct { - Settings []byte -} - -// MaxLen returns the maximum encoded length of an object. Implements -// rpc.Object. -func (r *RPCSettingsResponse) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo encodes a RPCSettingsResponse to an encoder. Implements -// types.EncoderTo. -func (r *RPCSettingsResponse) EncodeTo(e *types.Encoder) { - e.WriteBytes(r.Settings) -} - -// DecodeFrom decodes a RPCSettingsResponse from a decoder. Implements -// types.DecoderFrom. -func (r *RPCSettingsResponse) DecodeFrom(d *types.Decoder) { - r.Settings = d.ReadBytes() -} - -// RPCLatestRevisionRequest requests the host send the latest revision of the -// contract. -type RPCLatestRevisionRequest struct { - ContractID types.ElementID -} - -// MaxLen returns the maximum encoded length of an object. Implements -// rpc.Object. -func (r *RPCLatestRevisionRequest) MaxLen() int { - return 40 -} - -// EncodeTo encodes a RPCLatestRevisionRequest to an encoder. Implements -// types.EncoderTo. -func (r *RPCLatestRevisionRequest) EncodeTo(e *types.Encoder) { - r.ContractID.EncodeTo(e) -} - -// DecodeFrom decodes a RPCLatestRevisionRequest from a decoder. Implements -// types.DecoderFrom. -func (r *RPCLatestRevisionRequest) DecodeFrom(d *types.Decoder) { - r.ContractID.DecodeFrom(d) -} - -// RPCLatestRevisionResponse contains the latest revision of a contract from the -// host. -type RPCLatestRevisionResponse struct { - Revision Contract -} - -// MaxLen returns the maximum encoded length of an object. Implements -// rpc.Object. -func (r *RPCLatestRevisionResponse) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo encodes a RPCLatestRevisionResponse to an encoder. Implements -// types.EncoderTo. -func (r *RPCLatestRevisionResponse) EncodeTo(e *types.Encoder) { - r.Revision.EncodeTo(e) -} - -// DecodeFrom decodes a RPCLatestRevisionResponse from a decoder. Implements -// types.DecoderFrom. -func (r *RPCLatestRevisionResponse) DecodeFrom(d *types.Decoder) { - r.Revision.DecodeFrom(d) -} - -// RPCSettingsRegisteredResponse returns the settings ID to the renter to signal -// success. -type RPCSettingsRegisteredResponse struct { - ID SettingsID -} - -// MaxLen returns the maximum encoded length of an object. Implements -// rpc.Object. -func (r *RPCSettingsRegisteredResponse) MaxLen() int { - return 16 -} - -// EncodeTo encodes a RPCSettingsRegisteredResponse to an encoder. Implements -// types.EncoderTo. -func (r *RPCSettingsRegisteredResponse) EncodeTo(e *types.Encoder) { - e.Write(r.ID[:]) -} - -// DecodeFrom decodes a RPCSettingsRegisteredResponse from a decoder. Implements -// types.DecoderFrom. -func (r *RPCSettingsRegisteredResponse) DecodeFrom(d *types.Decoder) { - d.Read(r.ID[:]) -} - -func writeInstruction(e *types.Encoder, i Instruction) { - var spec rpc.Specifier - switch i.(type) { - case *InstrAppendSector: - spec = SpecInstrAppendSector - case *InstrUpdateSector: - spec = SpecInstrUpdateSector - case *InstrContractRevision: - spec = SpecInstrContractRevision - case *InstrSectorRoots: - spec = SpecInstrSectorRoots - case *InstrDropSectors: - spec = SpecInstrDropSectors - case *InstrHasSector: - spec = SpecInstrHasSector - case *InstrReadOffset: - spec = SpecInstrReadOffset - case *InstrReadRegistry: - spec = SpecInstrReadRegistry - case *InstrReadSector: - spec = SpecInstrReadSector - case *InstrSwapSector: - spec = SpecInstrSwapSector - case *InstrUpdateRegistry: - spec = SpecInstrUpdateRegistry - default: - panic("unhandled instruction") - } - spec.EncodeTo(e) - i.EncodeTo(e) -} - -func readInstruction(d *types.Decoder) (i Instruction) { - var spec rpc.Specifier - d.Read(spec[:]) - - switch spec { - case SpecInstrAppendSector: - i = new(InstrAppendSector) - case SpecInstrUpdateSector: - i = new(InstrUpdateSector) - case SpecInstrDropSectors: - i = new(InstrDropSectors) - case SpecInstrHasSector: - i = new(InstrHasSector) - case SpecInstrReadOffset: - i = new(InstrReadOffset) - case SpecInstrReadSector: - i = new(InstrReadSector) - case SpecInstrContractRevision: - i = new(InstrContractRevision) - case SpecInstrSwapSector: - i = new(InstrSwapSector) - case SpecInstrUpdateRegistry: - i = new(InstrUpdateRegistry) - case SpecInstrReadRegistry: - i = new(InstrReadRegistry) - default: - d.SetErr(fmt.Errorf("uknown instruction specifier, %v", spec)) - return - } - i.DecodeFrom(d) - return -} - -// RPCExecuteProgramRequest is the request for the RPC method "execute". -type RPCExecuteProgramRequest struct { - // FileContractID is the id of the filecontract we would like to modify. - FileContractID types.ElementID - // RenterSignature is the signature of the last revision of the file - // contract. - RenterSignature types.Signature - // Instructions are the instructions to be executed. - Instructions []Instruction - // ProgramDataLength is the length of the programData following this - // request. - ProgramDataLength uint64 -} - -// MaxLen returns the maximum encoded length of an object. Implements -// rpc.Object. -func (req *RPCExecuteProgramRequest) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo encodes a RPCExecuteProgramRequest to an encoder. Implements -// types.EncoderTo. -func (req *RPCExecuteProgramRequest) EncodeTo(e *types.Encoder) { - req.FileContractID.EncodeTo(e) - req.RenterSignature.EncodeTo(e) - e.WritePrefix(len(req.Instructions)) - for _, instruction := range req.Instructions { - writeInstruction(e, instruction) - } - e.WriteUint64(req.ProgramDataLength) -} - -// DecodeFrom decodes a RPCExecuteProgramRequest from a decoder. Implements -// types.DecoderFrom. -func (req *RPCExecuteProgramRequest) DecodeFrom(d *types.Decoder) { - req.FileContractID.DecodeFrom(d) - req.RenterSignature.DecodeFrom(d) - req.Instructions = make([]Instruction, d.ReadPrefix()) - for i := range req.Instructions { - req.Instructions[i] = readInstruction(d) - } - req.ProgramDataLength = d.ReadUint64() -} - -// Payment specifiers are used to specify the payment type -var ( - PayByContract = rpc.NewSpecifier("PayByContract") - PayByEphemeralAccount = rpc.NewSpecifier("PayByEphemAcc") -) - -// WithdrawalMessage is the amount of money to deduct from the account to create -// the RPC budget. -type WithdrawalMessage struct { - AccountID types.PublicKey - Expiry uint64 - Amount types.Currency - - // Nonce prevents duplicate withdrawals from being processed - Nonce [8]byte -} - -// SigHash computes the hash of the withdrawal message used for signing the -// pay by ephemeral account request. -func (wm *WithdrawalMessage) SigHash() types.Hash256 { - h := types.NewHasher() - wm.EncodeTo(h.E) - return h.Sum() -} - -// MaxLen implements rpc.Object. -func (wm *WithdrawalMessage) MaxLen() int { - return 32 + 8 + 16 + 8 -} - -// EncodeTo implements types.EncoderTo. -func (wm *WithdrawalMessage) EncodeTo(e *types.Encoder) { - wm.AccountID.EncodeTo(e) - e.WriteUint64(wm.Expiry) - wm.Amount.EncodeTo(e) - e.Write(wm.Nonce[:]) -} - -// DecodeFrom implements types.DecoderFrom. -func (wm *WithdrawalMessage) DecodeFrom(d *types.Decoder) { - wm.AccountID.DecodeFrom(d) - wm.Expiry = d.ReadUint64() - wm.Amount.DecodeFrom(d) - d.Read(wm.Nonce[:]) -} - -// PayByEphemeralAccountRequest is a request to create an RPC budget using funds -// from an ephemeral account. -type PayByEphemeralAccountRequest struct { - Message WithdrawalMessage - Signature types.Signature - Priority uint64 -} - -// MaxLen implements rpc.Object. -func (req *PayByEphemeralAccountRequest) MaxLen() int { - return req.Message.MaxLen() + 64 + 8 -} - -// EncodeTo implements types.EncoderTo. -func (req *PayByEphemeralAccountRequest) EncodeTo(e *types.Encoder) { - req.Message.EncodeTo(e) - req.Signature.EncodeTo(e) - e.WriteUint64(req.Priority) -} - -// DecodeFrom implements types.DecoderFrom. -func (req *PayByEphemeralAccountRequest) DecodeFrom(d *types.Decoder) { - req.Message.DecodeFrom(d) - req.Signature.DecodeFrom(d) - req.Priority = d.ReadUint64() -} - -// PayByContractRequest is a request to create an RPC budget using funds from a -// file contract. -type PayByContractRequest struct { - ContractID types.ElementID - RefundAccount types.PublicKey - Signature types.Signature - NewRevisionNumber uint64 - NewOutputs ContractOutputs -} - -// MaxLen implements rpc.Object. -func (req *PayByContractRequest) MaxLen() int { - // contract ID + revision number + payouts + refund + signature - return 40 + 8 + 64 + 32 + 64 -} - -// EncodeTo implements types.EncoderTo. -func (req *PayByContractRequest) EncodeTo(e *types.Encoder) { - req.ContractID.EncodeTo(e) - req.RefundAccount.EncodeTo(e) - req.Signature.EncodeTo(e) - e.WriteUint64(req.NewRevisionNumber) - req.NewOutputs.encodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom. -func (req *PayByContractRequest) DecodeFrom(d *types.Decoder) { - req.ContractID.DecodeFrom(d) - req.RefundAccount.DecodeFrom(d) - req.Signature.DecodeFrom(d) - req.NewRevisionNumber = d.ReadUint64() - req.NewOutputs.decodeFrom(d) -} - -// RPCRevisionSigningResponse is returned by the host when finalizing a contract -// revision. -type RPCRevisionSigningResponse struct { - Signature types.Signature -} - -// MaxLen implements rpc.Object. -func (resp *RPCRevisionSigningResponse) MaxLen() int { - return 64 -} - -// EncodeTo implements types.EncoderTo. -func (resp *RPCRevisionSigningResponse) EncodeTo(e *types.Encoder) { - resp.Signature.EncodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom. -func (resp *RPCRevisionSigningResponse) DecodeFrom(d *types.Decoder) { - resp.Signature.DecodeFrom(d) -} - -// RPCAccountBalanceResponse is the returned response for RPCAccountBalance. -type RPCAccountBalanceResponse struct { - Balance types.Currency -} - -// MaxLen returns the maximum length of the encoded object. Implements -// rpc.Object. -func (resp *RPCAccountBalanceResponse) MaxLen() int { - return 16 -} - -// EncodeTo encodes a RPCAccountBalanceResponse to an encoder. Implements -// types.EncoderTo. -func (resp *RPCAccountBalanceResponse) EncodeTo(e *types.Encoder) { - resp.Balance.EncodeTo(e) -} - -// DecodeFrom decodes a RPCAccountBalanceResponse from a decoder. Implements -// types.DecoderFrom. -func (resp *RPCAccountBalanceResponse) DecodeFrom(d *types.Decoder) { - resp.Balance.DecodeFrom(d) -} - -// RPCAccountBalanceRequest is a request for the balance of an account. -type RPCAccountBalanceRequest struct { - AccountID types.PublicKey -} - -// MaxLen returns the maximum length of the encoded object. Implements -// rpc.Object. -func (resp *RPCAccountBalanceRequest) MaxLen() int { - return 32 -} - -// EncodeTo encodes a RPCAccountBalanceRequest to an encoder. Implements -// types.EncoderTo. -func (resp *RPCAccountBalanceRequest) EncodeTo(e *types.Encoder) { - resp.AccountID.EncodeTo(e) -} - -// DecodeFrom decodes a RPCAccountBalanceRequest from a decoder. Implements -// types.DecoderFrom. -func (resp *RPCAccountBalanceRequest) DecodeFrom(d *types.Decoder) { - resp.AccountID.DecodeFrom(d) -} - -// RPCFundAccountRequest is a request to fund an account. -type RPCFundAccountRequest struct { - AccountID types.PublicKey -} - -// MaxLen returns the maximum length of the encoded object. Implements -// rpc.Object. -func (resp *RPCFundAccountRequest) MaxLen() int { - return 32 -} - -// EncodeTo encodes a RPCFundAccountRequest to an encoder. Implements -// types.EncoderTo. -func (resp *RPCFundAccountRequest) EncodeTo(e *types.Encoder) { - resp.AccountID.EncodeTo(e) -} - -// DecodeFrom decodes a RPCFundAccountRequest from a decoder. Implements -// types.DecoderFrom. -func (resp *RPCFundAccountRequest) DecodeFrom(d *types.Decoder) { - resp.AccountID.DecodeFrom(d) -} - -// A Receipt is returned as part of funding an ephemeral account. It shows the -// amount deposited and the account. -type Receipt struct { - Account types.PublicKey - Host types.PublicKey - Amount types.Currency - Timestamp time.Time -} - -// SigHash computes the hash of the receipt. Used for signing the -// pay by ephemeral account response. -func (r *Receipt) SigHash() types.Hash256 { - h := types.NewHasher() - r.EncodeTo(h.E) - return h.Sum() -} - -// MaxLen returns the maximum length of the encoded object. Implements -// rpc.Object. -func (r *Receipt) MaxLen() int { - return 32 + 32 + 16 + 8 -} - -// EncodeTo encodes a Receipt to an encoder. Implements -// types.EncoderTo. -func (r *Receipt) EncodeTo(e *types.Encoder) { - r.Account.EncodeTo(e) - r.Host.EncodeTo(e) - r.Amount.EncodeTo(e) - e.WriteTime(r.Timestamp) -} - -// DecodeFrom decodes a Receipt from a decoder. Implements -// types.DecoderFrom. -func (r *Receipt) DecodeFrom(d *types.Decoder) { - r.Account.DecodeFrom(d) - r.Host.DecodeFrom(d) - r.Amount.DecodeFrom(d) - r.Timestamp = d.ReadTime() -} - -// RPCFundAccountResponse is the response to a RPCFundAccountRequest. It returns -// the current balance of the account and a signed receipt from the host. -type RPCFundAccountResponse struct { - Balance types.Currency - Receipt Receipt - Signature types.Signature -} - -// MaxLen returns the maximum length of the encoded object. Implements -// rpc.Object. -func (resp *RPCFundAccountResponse) MaxLen() int { - return 16 + resp.Receipt.MaxLen() + 8 + 64 -} - -// EncodeTo encodes a RPCFundAccountResponse to an encoder. Implements -// types.EncoderTo. -func (resp *RPCFundAccountResponse) EncodeTo(e *types.Encoder) { - resp.Balance.EncodeTo(e) - resp.Receipt.EncodeTo(e) - resp.Signature.EncodeTo(e) -} - -// DecodeFrom decodes a RPCFundAccountResponse from a decoder. Implements -// types.DecoderFrom. -func (resp *RPCFundAccountResponse) DecodeFrom(d *types.Decoder) { - resp.Balance.DecodeFrom(d) - resp.Receipt.DecodeFrom(d) - resp.Signature.DecodeFrom(d) -} - -// RPCExecuteInstrResponse is sent to the renter by the host for each -// successfully executed instruction during program execution. The -// final response is used to determine the final contract state. -type RPCExecuteInstrResponse struct { - AdditionalCollateral types.Currency - AdditionalStorage types.Currency - FailureRefund types.Currency - TotalCost types.Currency - OutputLength uint64 - NewDataSize uint64 - NewMerkleRoot types.Hash256 - Proof []types.Hash256 - Error error -} - -// MaxLen returns the maximum length of the encoded object. Implements -// rpc.Object. -func (resp *RPCExecuteInstrResponse) MaxLen() int { - return defaultMaxLen -} - -// EncodeTo encodes a RPCExecuteInstrResponse to an encoder. Implements -// types.EncoderTo. -func (resp *RPCExecuteInstrResponse) EncodeTo(e *types.Encoder) { - resp.AdditionalCollateral.EncodeTo(e) - resp.AdditionalStorage.EncodeTo(e) - resp.FailureRefund.EncodeTo(e) - resp.TotalCost.EncodeTo(e) - e.WriteUint64(resp.OutputLength) - e.WriteUint64(resp.NewDataSize) - resp.NewMerkleRoot.EncodeTo(e) - e.WritePrefix(len(resp.Proof)) - for _, h := range resp.Proof { - h.EncodeTo(e) - } - var errStr string - if resp.Error != nil { - errStr = resp.Error.Error() - } - e.WriteString(errStr) -} - -// DecodeFrom decodes a RPCExecuteInstrResponse from a decoder. Implements -// types.DecoderFrom. -func (resp *RPCExecuteInstrResponse) DecodeFrom(d *types.Decoder) { - resp.AdditionalCollateral.DecodeFrom(d) - resp.AdditionalStorage.DecodeFrom(d) - resp.FailureRefund.DecodeFrom(d) - resp.TotalCost.DecodeFrom(d) - resp.OutputLength = d.ReadUint64() - resp.NewDataSize = d.ReadUint64() - resp.NewMerkleRoot.DecodeFrom(d) - resp.Proof = make([]types.Hash256, d.ReadPrefix()) - for i := range resp.Proof { - resp.Proof[i].DecodeFrom(d) - } - if str := d.ReadString(); len(str) != 0 { - resp.Error = errors.New(str) - } -} - -// RPCFinalizeProgramRequest is a request sent by the renter after execution -// of a read-write program to update the contract with the new collateral -// and storage burn. -type RPCFinalizeProgramRequest struct { - Signature types.Signature - NewRevisionNumber uint64 - NewOutputs ContractOutputs -} - -// MaxLen returns the maximum encoded size of the object; implements rpc.Object. -func (req *RPCFinalizeProgramRequest) MaxLen() int { - return 64 + 8 + 16*4 -} - -// EncodeTo encodes the RPCFinalizeProgramRequest to the encoder. Implements -// types.EncoderTo. -func (req *RPCFinalizeProgramRequest) EncodeTo(e *types.Encoder) { - req.Signature.EncodeTo(e) - e.WriteUint64(req.NewRevisionNumber) - req.NewOutputs.encodeTo(e) -} - -// DecodeFrom decodes the RPCFinalizeProgramRequest from the decoder. Implements -// types.DecoderFrom. -func (req *RPCFinalizeProgramRequest) DecodeFrom(d *types.Decoder) { - req.Signature.DecodeFrom(d) - req.NewRevisionNumber = d.ReadUint64() - req.NewOutputs.decodeFrom(d) -} diff --git a/v2/net/rhp/rpc_test.go b/v2/net/rhp/rpc_test.go deleted file mode 100644 index 7fe147fc..00000000 --- a/v2/net/rhp/rpc_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package rhp - -import ( - "bytes" - "math/rand" - "reflect" - "testing" - "testing/quick" - - "go.sia.tech/core/v2/types" -) - -func randStruct(t reflect.Type, rand *rand.Rand) reflect.Value { - v := reflect.New(t) - for i := 0; i < v.Elem().NumField(); i++ { - // time.Time contains unexported fields which makes testing/quick not - // work so we have to have custom Generate function that skips over - // fields containing it - tStr := t.Field(i).Type.String() - if tStr == "time.Time" || tStr == "error" { - continue - } else if tStr == "[]rhp.Instruction" { - v.Elem().Field(i).Set(reflect.ValueOf([]Instruction{})) - continue - } - elem, ok := quick.Value(t.Field(i).Type, rand) - if !ok { - return reflect.Value{} - } - v.Elem().Field(i).Set(elem) - } - return v -} - -// Generate implements quick.Generator. -func (*HostSettings) Generate(rand *rand.Rand, size int) reflect.Value { - return randStruct(reflect.TypeOf(HostSettings{}), rand) -} - -// Generate implements quick.Generator. -func (*RPCExecuteProgramRequest) Generate(rand *rand.Rand, size int) reflect.Value { - return randStruct(reflect.TypeOf(RPCExecuteProgramRequest{}), rand) -} - -// Generate implements quick.Generator. -func (*RPCExecuteInstrResponse) Generate(rand *rand.Rand, size int) reflect.Value { - return randStruct(reflect.TypeOf(RPCExecuteInstrResponse{}), rand) -} - -func TestEncoderRoundtrip(t *testing.T) { - tests := []types.EncoderTo{ - &RPCContractSignatures{}, - &RPCRenewContractRenterSignatures{}, - &RPCLockRequest{}, - &RPCLockResponse{}, - &RPCReadRequest{}, - &RPCReadResponse{}, - &RPCSectorRootsRequest{}, - &RPCSectorRootsResponse{}, - &RPCWriteAction{}, - &RPCWriteRequest{}, - &RPCWriteMerkleProof{}, - &RPCWriteResponse{}, - &RPCSettingsResponse{}, - &RPCLatestRevisionRequest{}, - &RPCLatestRevisionResponse{}, - &RPCSettingsRegisteredResponse{}, - &RPCExecuteProgramRequest{}, - &WithdrawalMessage{}, - &PayByEphemeralAccountRequest{}, - &PayByContractRequest{}, - &RPCRevisionSigningResponse{}, - &RPCAccountBalanceResponse{}, - &RPCAccountBalanceRequest{}, - &RPCFundAccountRequest{}, - &RPCExecuteInstrResponse{}, - &RPCFinalizeProgramRequest{}, - &SettingsID{}, - &HostSettings{}, - } - - for _, val := range tests { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - typ := reflect.TypeOf(val) - randVal, ok := quick.Value(typ, rand.New(rand.NewSource(0))) - if !ok { - t.Errorf("could not generate random value for type %s", typ) - } - newVal := randVal.Interface() - newVal.(types.EncoderTo).EncodeTo(e) - e.Flush() - - decval := reflect.New(typ.Elem()) - decval.Interface().(types.DecoderFrom).DecodeFrom(types.NewBufDecoder(buf.Bytes())) - dec := decval.Interface() - - if !reflect.DeepEqual(dec, newVal) { - t.Fatalf("value did not survive roundtrip: expected %v, got %v", newVal, dec) - } - } -} diff --git a/v2/net/rhp/session.go b/v2/net/rhp/session.go deleted file mode 100644 index 722682b6..00000000 --- a/v2/net/rhp/session.go +++ /dev/null @@ -1,123 +0,0 @@ -// Package rhp implements the Sia renter-host protocol. -package rhp - -import ( - "crypto/ed25519" - "errors" - "fmt" - "io" - "net" - - "go.sia.tech/core/v2/types" - - "go.sia.tech/mux" - "golang.org/x/crypto/blake2b" - "lukechampine.com/frand" -) - -const protocolVersion = 1 - -// ErrRenterClosed is returned by (*Session).ReadID when the renter sends the -// session termination signal. -var ErrRenterClosed = errors.New("renter has terminated session") - -func hashChallenge(challenge [16]byte) [32]byte { - c := make([]byte, 32) - copy(c[:16], "challenge") - copy(c[16:], challenge[:]) - return blake2b.Sum256(c) -} - -// A Session is an ongoing exchange of RPCs via the renter-host protocol. -type Session struct { - *mux.Mux - challenge [16]byte -} - -// SetChallenge sets the current session challenge. Challenges allow the host to -// verify that a renter controls the contract signing key before allowing them -// to lock the contract. -func (s *Session) SetChallenge(challenge [16]byte) { - s.challenge = challenge -} - -// SignChallenge signs the current session challenge. -func (s *Session) SignChallenge(priv types.PrivateKey) (sig types.Signature) { - return priv.SignHash(hashChallenge(s.challenge)) -} - -// VerifyChallenge verifies a signature of the current session challenge. -func (s *Session) VerifyChallenge(sig types.Signature, pub types.PublicKey) bool { - return pub.VerifyHash(hashChallenge(s.challenge), sig) -} - -// AcceptSession conducts the host's half of the renter-host protocol handshake, -// returning a Session that can be used to handle RPC requests. -func AcceptSession(conn net.Conn, priv types.PrivateKey) (_ *Session, err error) { - m, err := mux.Accept(conn, ed25519.PrivateKey(priv)) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - m.Close() - } - }() - // exchange versions and write initial challenge - s, err := m.AcceptStream() - if err != nil { - return nil, err - } - defer s.Close() - var buf [1]byte - if _, err := s.Read(buf[:]); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) - } else if _, err := s.Write([]byte{protocolVersion}); err != nil { - return nil, fmt.Errorf("could not write our version: %w", err) - } else if version := buf[0]; version != protocolVersion { - return nil, fmt.Errorf("incompatible versions (ours = %v, theirs = %v)", protocolVersion, version) - } - challenge := frand.Entropy128() - if _, err := s.Write(challenge[:]); err != nil { - return nil, fmt.Errorf("couldn't write challenge: %w", err) - } - return &Session{ - Mux: m, - challenge: challenge, - }, nil -} - -// DialSession conducts the renter's half of the renter-host protocol handshake, -// returning a Session that can be used to make RPC requests. -func DialSession(conn net.Conn, pub types.PublicKey) (_ *Session, err error) { - m, err := mux.Dial(conn, pub[:]) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - m.Close() - } - }() - // exchange versions and read host's initial challenge - s := m.DialStream() - defer s.Close() - var buf [1]byte - if _, err := s.Write([]byte{protocolVersion}); err != nil { - return nil, fmt.Errorf("could not write our version: %w", err) - } else if _, err := s.Read(buf[:]); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) - } else if version := buf[0]; version != protocolVersion { - return nil, fmt.Errorf("incompatible versions (ours = %v, theirs = %v)", protocolVersion, version) - } - var challenge [16]byte - if _, err := io.ReadFull(s, challenge[:]); err != nil { - return nil, fmt.Errorf("couldn't read host challenge: %w", err) - } else if challenge == ([16]byte{}) { - return nil, errors.New("host rejected our version") - } - return &Session{ - Mux: m, - challenge: challenge, - }, nil -} diff --git a/v2/net/rhp/session_test.go b/v2/net/rhp/session_test.go deleted file mode 100644 index b8015e16..00000000 --- a/v2/net/rhp/session_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package rhp - -import ( - "bytes" - "errors" - "io" - "math" - "math/rand" - "net" - "reflect" - "testing" - "testing/quick" - "time" - - "go.sia.tech/core/v2/net/rpc" - "go.sia.tech/core/v2/types" - - "lukechampine.com/frand" -) - -var randomTxn = func() types.Transaction { - var valueFn func(t reflect.Type, r *rand.Rand) reflect.Value - valueFn = func(t reflect.Type, r *rand.Rand) reflect.Value { - if t.String() == "types.SpendPolicy" { - return reflect.ValueOf(types.AnyoneCanSpend()) - } - v := reflect.New(t).Elem() - switch t.Kind() { - default: - v, _ = quick.Value(t, r) - case reflect.Slice: - // 3 elements per slice to prevent generating giant objects - v.Set(reflect.MakeSlice(t, 3, 3)) - for i := 0; i < v.Len(); i++ { - v.Index(i).Set(valueFn(t.Elem(), r)) - } - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - v.Field(i).Set(valueFn(t.Field(i).Type, r)) - } - } - return v - } - r := rand.New(frand.NewSource()) - txn := valueFn(reflect.TypeOf(types.Transaction{}), r) - return txn.Interface().(types.Transaction) -}() - -func deepEqual(a, b types.EncoderTo) bool { - var abuf bytes.Buffer - e := types.NewEncoder(&abuf) - a.EncodeTo(e) - e.Flush() - var bbuf bytes.Buffer - e = types.NewEncoder(&bbuf) - b.EncodeTo(e) - e.Flush() - return bytes.Equal(abuf.Bytes(), bbuf.Bytes()) -} - -func TestSession(t *testing.T) { - // initialize host - hostPrivKey := types.GeneratePrivateKey() - hostPubKey := hostPrivKey.PublicKey() - contractPrivKey := types.GeneratePrivateKey() - contractPubKey := contractPrivKey.PublicKey() - l, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - peerErr := make(chan error, 1) - go func() { - peerErr <- func() error { - conn, err := l.Accept() - if err != nil { - return err - } - defer conn.Close() - sess, err := AcceptSession(conn, hostPrivKey) - if err != nil { - return err - } - defer sess.Close() - - // receive+verify signed challenge - stream, err := sess.AcceptStream() - if err != nil { - return err - } - defer stream.Close() - var sig types.Signature - if _, err := io.ReadFull(stream, sig[:]); err != nil { - return err - } - if !sess.VerifyChallenge(sig, contractPubKey) { - return errors.New("invalid challenge signature") - } - return nil - }() - }() - - // connect to host - conn, err := net.Dial("tcp", l.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - sess, err := DialSession(conn, hostPubKey) - if err != nil { - t.Fatal(err) - } - defer sess.Close() - stream := sess.DialStream() - defer stream.Close() - - // sign and send challenge - sig := sess.SignChallenge(contractPrivKey) - if _, err := stream.Write(sig[:]); err != nil { - t.Fatal(err) - } - if err := <-peerErr; err != nil { - t.Fatal(err) - } -} - -func TestChallenge(t *testing.T) { - s := Session{} - frand.Read(s.challenge[:]) - privkey := types.GeneratePrivateKey() - pubkey := privkey.PublicKey() - sig := s.SignChallenge(privkey) - if !s.VerifyChallenge(sig, pubkey) { - t.Fatal("challenge was not signed/verified correctly") - } -} - -func TestEncoding(t *testing.T) { - randSignature := func() (s types.Signature) { - frand.Read(s[:]) - return - } - randPubKey := func() (p types.PublicKey) { - frand.Read(p[:]) - return - } - objs := []rpc.Object{ - &rpc.Specifier{'f', 'o', 'o'}, - &RPCFormContractRequest{ - Inputs: randomTxn.SiacoinInputs, - Outputs: randomTxn.SiacoinOutputs, - MinerFee: randomTxn.MinerFee, - Contract: randomTxn.FileContracts[0], - }, - &RPCRenewContractRequest{ - Inputs: randomTxn.SiacoinInputs, - Outputs: randomTxn.SiacoinOutputs, - MinerFee: randomTxn.MinerFee, - Resolution: randomTxn.FileContractResolutions[0], - }, - &RPCFormContractHostAdditions{ - Inputs: randomTxn.SiacoinInputs, - Outputs: randomTxn.SiacoinOutputs, - ContractSignature: randomTxn.SiacoinInputs[0].Signatures[0], - }, - &RPCRenewContractHostAdditions{ - Inputs: randomTxn.SiacoinInputs, - Outputs: randomTxn.SiacoinOutputs, - RenewalSignature: randomTxn.SiacoinInputs[0].Signatures[0], - FinalizationSignature: randomTxn.SiacoinInputs[0].Signatures[0], - }, - &RPCContractSignatures{ - SiacoinInputSignatures: [][]types.Signature{ - randomTxn.SiacoinInputs[0].Signatures, - }, - }, - &RPCRenewContractRenterSignatures{ - SiacoinInputSignatures: [][]types.Signature{ - randomTxn.SiacoinInputs[0].Signatures, - }, - RenewalSignature: randomTxn.SiacoinInputs[0].Signatures[0], - }, - &RPCLockRequest{ - ContractID: randomTxn.FileContractRevisions[0].Parent.ID, - Signature: randSignature(), - Timeout: frand.Uint64n(100), - }, - &RPCLockResponse{ - Revision: randomTxn.FileContractRevisions[0].Revision, - }, - &RPCReadRequest{ - Sections: []RPCReadRequestSection{{}}, - NewRevisionNumber: frand.Uint64n(100), - Signature: randSignature(), - }, - &RPCReadResponse{ - Signature: randSignature(), - Data: frand.Bytes(8), - MerkleProof: randomTxn.SiacoinInputs[0].Parent.MerkleProof, - }, - &RPCSectorRootsRequest{ - RootOffset: frand.Uint64n(100), - NumRoots: frand.Uint64n(100), - NewRevisionNumber: frand.Uint64n(100), - Signature: randSignature(), - }, - &RPCSectorRootsResponse{ - SectorRoots: randomTxn.SiacoinInputs[0].Parent.MerkleProof, - MerkleProof: randomTxn.SiacoinInputs[0].Parent.MerkleProof, - Signature: randSignature(), - }, - &RPCSettingsResponse{ - Settings: frand.Bytes(128), - }, - &RPCWriteRequest{ - Actions: []RPCWriteAction{{Data: frand.Bytes(8)}}, - NewRevisionNumber: frand.Uint64n(100), - }, - &RPCWriteMerkleProof{ - OldSubtreeHashes: randomTxn.SiacoinInputs[0].Parent.MerkleProof, - OldLeafHashes: randomTxn.SiacoinInputs[0].Parent.MerkleProof, - NewMerkleRoot: types.Hash256{4, 5, 6}, - }, - &RPCWriteResponse{ - Signature: randSignature(), - }, - &RPCRevisionSigningResponse{ - Signature: randSignature(), - }, - &RPCAccountBalanceResponse{ - Balance: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - }, - &RPCAccountBalanceRequest{ - AccountID: randPubKey(), - }, - &RPCFundAccountRequest{ - AccountID: randPubKey(), - }, - &RPCFundAccountResponse{ - Balance: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - Receipt: Receipt{ - Host: randPubKey(), - Account: randPubKey(), - Amount: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - Timestamp: time.Now(), - }, - Signature: randSignature(), - }, - &RPCExecuteInstrResponse{ - AdditionalCollateral: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - OutputLength: frand.Uint64n(100), - NewMerkleRoot: types.Hash256(randPubKey()), - NewDataSize: frand.Uint64n(100), - Proof: randomTxn.SiacoinInputs[0].Parent.MerkleProof, - Error: errors.New(string(frand.Bytes(128))), - TotalCost: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - FailureRefund: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - }, - &RPCFinalizeProgramRequest{ - Signature: randSignature(), - NewRevisionNumber: frand.Uint64n(100), - NewOutputs: ContractOutputs{ - HostValue: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - RenterValue: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - MissedHostValue: types.NewCurrency64(frand.Uint64n(math.MaxUint64)), - }, - }, - } - for _, o := range objs { - var b bytes.Buffer - e := types.NewEncoder(&b) - o.EncodeTo(e) - e.Flush() - dup := reflect.New(reflect.TypeOf(o).Elem()).Interface().(rpc.Object) - d := types.NewBufDecoder(b.Bytes()) - dup.DecodeFrom(d) - if d.Err() != nil { - t.Errorf("error decoding %T: %v", o, d.Err()) - } else if !deepEqual(o, dup) { - t.Errorf("%T objects differ after unmarshalling", o) - } - } -} diff --git a/v2/net/rhp/settings.go b/v2/net/rhp/settings.go deleted file mode 100644 index d652e634..00000000 --- a/v2/net/rhp/settings.go +++ /dev/null @@ -1,188 +0,0 @@ -package rhp - -import ( - "time" - - "go.sia.tech/core/v2/types" -) - -// A SettingsID is a unique identifier for registered host settings used by renters -// when interacting with the host. -type SettingsID [16]byte - -// MaxLen returns the maximum length of an encoded object. Implements rpc.Object. -func (id *SettingsID) MaxLen() int { - return 16 -} - -// EncodeTo encodes a SettingsID to an encoder. Implements types.EncoderTo. -func (id *SettingsID) EncodeTo(e *types.Encoder) { - e.Write(id[:]) -} - -// DecodeFrom decodes a SettingsID from a decoder. Implements types.DecoderFrom. -func (id *SettingsID) DecodeFrom(d *types.Decoder) { - d.Read(id[:]) -} - -// HostSettings are the settings and prices used when interacting with a host. -type HostSettings struct { - AcceptingContracts bool `json:"acceptingContracts"` - Address types.Address `json:"address"` - BlockHeight uint64 `json:"blockHeight"` - EphemeralAccountExpiry time.Duration `json:"ephemeralAccountExpiry"` - MaxCollateral types.Currency `json:"maxCollateral"` - MaxDuration uint64 `json:"maxDuration"` - MaxEphemeralAccountBalance types.Currency `json:"maxEphemeralAccountBalance"` - NetAddress string `json:"netAddress"` - RemainingRegistryEntries uint64 `json:"remainingRegistryEntries"` - RemainingStorage uint64 `json:"remainingStorage"` - SectorSize uint64 `json:"sectorSize"` - TotalRegistryEntries uint64 `json:"totalRegistryEntries"` - TotalStorage uint64 `json:"totalStorage"` - ValidUntil time.Time `json:"validUntil"` - Version string `json:"version"` - WindowSize uint64 `json:"windowSize"` - - ContractFee types.Currency `json:"contractFee"` - // Collateral is the amount of Hastings per byte per block that the host is willing to risk. - Collateral types.Currency `json:"collateral"` - // DownloadBandwidthPrice is the amount of Hastings per byte of download data charged to the renter. - DownloadBandwidthPrice types.Currency `json:"downloadBandwidthPrice"` - // UploadBandwidthPrice is the amount of Hastings per byte of upload data charged to the renter. - UploadBandwidthPrice types.Currency `json:"uploadBandwidthPrice"` - // StoragePrice is the amount of Hastings per byte per block to store data on the host. - StoragePrice types.Currency `json:"storagePrice"` - - RPCAccountBalanceCost types.Currency `json:"rpcAccountBalanceCost"` - RPCFundAccountCost types.Currency `json:"rpcFundAccountCost"` - RPCHostSettingsCost types.Currency `json:"rpcHostSettingsCost"` - RPCLatestRevisionCost types.Currency `json:"rpcLatestRevisionCost"` - RPCRenewContractCost types.Currency `json:"rpcRenewContractCost"` - - // ProgInitBaseCost is the cost in Hastings that is incurred when an MDM - // program starts to run. This doesn't include the memory used by the - // program data. The total cost to initialize a program is calculated as - // InitCost = InitBaseCost + MemoryTimeCost * Time - ProgInitBaseCost types.Currency `json:"progInitBaseCost"` - // ProgMemoryTimeCost is the cost in Hastings per byte per time that is - // incurred by the memory consumption of the program. - ProgMemoryTimeCost types.Currency `json:"progMemorytimecost"` - // ProgReadCost is the cost in Hastings per byte of data read from disk during program executions. - ProgReadCost types.Currency `json:"progReadCost"` - // ProgWriteCost is the cost in Hastings per byte, rounded up to the nearest multiple of 4KiB, of data written to - // disk during program execution. - ProgWriteCost types.Currency `json:"progWriteCost"` - - InstrAppendSectorBaseCost types.Currency `json:"instrAppendSectorsBaseCost"` - InstrDropSectorsBaseCost types.Currency `json:"instrDropSectorsBaseCost"` - InstrDropSectorsUnitCost types.Currency `json:"instrDropSectorsUnitCost"` - InstrHasSectorBaseCost types.Currency `json:"instrHasSectorBaseCost"` - InstrReadBaseCost types.Currency `json:"instrReadBaseCost"` - InstrReadRegistryBaseCost types.Currency `json:"instrReadRegistryBaseCost"` - InstrRevisionBaseCost types.Currency `json:"instrRevisionBaseCost"` - InstrSectorRootsBaseCost types.Currency `json:"instrSectorRootsBaseCost"` - InstrSwapSectorBaseCost types.Currency `json:"instrSwapSectorCost"` - InstrUpdateRegistryBaseCost types.Currency `json:"instrUpdateRegistryBaseCost"` - InstrUpdateSectorBaseCost types.Currency `json:"instrUpdateSectorBaseCost"` - InstrWriteBaseCost types.Currency `json:"instrWriteBaseCost"` -} - -// EncodeTo encodes host settings to the encoder; implements types.EncoderTo. -func (p *HostSettings) EncodeTo(e *types.Encoder) { - e.WriteTime(p.ValidUntil) - e.WriteBool(p.AcceptingContracts) - e.WriteUint64(p.BlockHeight) - e.WriteUint64(uint64(p.EphemeralAccountExpiry)) - p.MaxCollateral.EncodeTo(e) - e.WriteUint64(p.MaxDuration) - p.MaxEphemeralAccountBalance.EncodeTo(e) - e.WriteString(p.NetAddress) - e.WriteUint64(p.RemainingStorage) - e.WriteUint64(p.TotalStorage) - e.WriteUint64(p.RemainingRegistryEntries) - e.WriteUint64(p.TotalRegistryEntries) - e.WriteUint64(p.SectorSize) - p.Address.EncodeTo(e) - e.WriteString(p.Version) - e.WriteUint64(p.WindowSize) - p.ContractFee.EncodeTo(e) - p.Collateral.EncodeTo(e) - p.DownloadBandwidthPrice.EncodeTo(e) - p.UploadBandwidthPrice.EncodeTo(e) - p.StoragePrice.EncodeTo(e) - p.RPCAccountBalanceCost.EncodeTo(e) - p.RPCFundAccountCost.EncodeTo(e) - p.RPCLatestRevisionCost.EncodeTo(e) - p.RPCRenewContractCost.EncodeTo(e) - p.RPCHostSettingsCost.EncodeTo(e) - p.ProgInitBaseCost.EncodeTo(e) - p.ProgMemoryTimeCost.EncodeTo(e) - p.ProgReadCost.EncodeTo(e) - p.ProgWriteCost.EncodeTo(e) - p.InstrAppendSectorBaseCost.EncodeTo(e) - p.InstrDropSectorsBaseCost.EncodeTo(e) - p.InstrDropSectorsUnitCost.EncodeTo(e) - p.InstrHasSectorBaseCost.EncodeTo(e) - p.InstrReadBaseCost.EncodeTo(e) - p.InstrReadRegistryBaseCost.EncodeTo(e) - p.InstrSwapSectorBaseCost.EncodeTo(e) - p.InstrUpdateRegistryBaseCost.EncodeTo(e) - p.InstrRevisionBaseCost.EncodeTo(e) - p.InstrSectorRootsBaseCost.EncodeTo(e) - p.InstrUpdateSectorBaseCost.EncodeTo(e) - p.InstrWriteBaseCost.EncodeTo(e) -} - -// DecodeFrom decodes host settings from the decoder; implements types.DecoderFrom. -func (p *HostSettings) DecodeFrom(d *types.Decoder) { - p.ValidUntil = d.ReadTime() - p.AcceptingContracts = d.ReadBool() - p.BlockHeight = d.ReadUint64() - p.EphemeralAccountExpiry = time.Duration(d.ReadUint64()) - p.MaxCollateral.DecodeFrom(d) - p.MaxDuration = d.ReadUint64() - p.MaxEphemeralAccountBalance.DecodeFrom(d) - p.NetAddress = d.ReadString() - p.RemainingStorage = d.ReadUint64() - p.TotalStorage = d.ReadUint64() - p.RemainingRegistryEntries = d.ReadUint64() - p.TotalRegistryEntries = d.ReadUint64() - p.SectorSize = d.ReadUint64() - p.Address.DecodeFrom(d) - p.Version = d.ReadString() - p.WindowSize = d.ReadUint64() - p.ContractFee.DecodeFrom(d) - p.Collateral.DecodeFrom(d) - p.DownloadBandwidthPrice.DecodeFrom(d) - p.UploadBandwidthPrice.DecodeFrom(d) - p.StoragePrice.DecodeFrom(d) - p.RPCAccountBalanceCost.DecodeFrom(d) - p.RPCFundAccountCost.DecodeFrom(d) - p.RPCLatestRevisionCost.DecodeFrom(d) - p.RPCRenewContractCost.DecodeFrom(d) - p.RPCHostSettingsCost.DecodeFrom(d) - p.ProgInitBaseCost.DecodeFrom(d) - p.ProgMemoryTimeCost.DecodeFrom(d) - p.ProgReadCost.DecodeFrom(d) - p.ProgWriteCost.DecodeFrom(d) - p.InstrAppendSectorBaseCost.DecodeFrom(d) - p.InstrDropSectorsBaseCost.DecodeFrom(d) - p.InstrDropSectorsUnitCost.DecodeFrom(d) - p.InstrHasSectorBaseCost.DecodeFrom(d) - p.InstrReadBaseCost.DecodeFrom(d) - p.InstrReadRegistryBaseCost.DecodeFrom(d) - p.InstrSwapSectorBaseCost.DecodeFrom(d) - p.InstrUpdateRegistryBaseCost.DecodeFrom(d) - p.InstrRevisionBaseCost.DecodeFrom(d) - p.InstrSectorRootsBaseCost.DecodeFrom(d) - p.InstrUpdateSectorBaseCost.DecodeFrom(d) - p.InstrWriteBaseCost.DecodeFrom(d) -} - -// MaxLen implements rpc.Object. -func (p *HostSettings) MaxLen() int { - // UUID + bool + 25 types.Currency fields + 9 uint64 fields + version string + netaddress string - // netaddress maximum is based on RFC 1035 https://www.freesoft.org/CIE/RFC/1035/9.htm. - return 16 + 1 + (25 * 16) + (9 * 8) + 10 + 256 -} diff --git a/v2/net/rpc/rpc.go b/v2/net/rpc/rpc.go deleted file mode 100644 index 6683a584..00000000 --- a/v2/net/rpc/rpc.go +++ /dev/null @@ -1,173 +0,0 @@ -package rpc - -import ( - "bytes" - "fmt" - "io" - "strings" - - "go.sia.tech/core/v2/types" -) - -// An Object can be sent and received via RPC. -type Object interface { - types.EncoderTo - types.DecoderFrom - MaxLen() int -} - -// A Specifier is a generic identification tag. -type Specifier [16]byte - -// EncodeTo implements Object. -func (s *Specifier) EncodeTo(e *types.Encoder) { e.Write(s[:]) } - -// DecodeFrom implements Object. -func (s *Specifier) DecodeFrom(d *types.Decoder) { d.Read(s[:]) } - -// MaxLen implements Object. -func (s *Specifier) MaxLen() int { return 16 } - -// String implements fmt.Stringer. -func (s Specifier) String() string { return string(bytes.Trim(s[:], "\x00")) } - -// NewSpecifier constructs a Specifier from the provided string, which must not -// be longer than 16 bytes. -func NewSpecifier(str string) Specifier { - if len(str) > 16 { - panic("specifier is too long") - } - var s Specifier - copy(s[:], str) - return s -} - -// An Error may be sent instead of a response object to any RPC. -type Error struct { - Type Specifier - Data []byte // structure depends on Type - Description string // human-readable error string -} - -// EncodeTo implements types.EncoderTo. -func (err *Error) EncodeTo(e *types.Encoder) { - err.Type.EncodeTo(e) - e.WriteBytes(err.Data) - e.WriteString(err.Description) -} - -// DecodeFrom implements types.DecoderFrom. -func (err *Error) DecodeFrom(d *types.Decoder) { - err.Type.DecodeFrom(d) - err.Data = d.ReadBytes() - err.Description = d.ReadString() -} - -// MaxLen implements Object. -func (err *Error) MaxLen() int { - return 1024 // arbitrary -} - -// Error implements the error interface. -func (err *Error) Error() string { - return err.Description -} - -// Is reports whether this error matches target. -func (err *Error) Is(target error) bool { - return strings.Contains(err.Description, target.Error()) -} - -// rpcResponse is a helper type for encoding and decoding RPC responses. -type rpcResponse struct { - err *Error - obj Object -} - -func (resp *rpcResponse) EncodeTo(e *types.Encoder) { - e.WriteBool(resp.err != nil) - if resp.err != nil { - resp.err.EncodeTo(e) - } else { - resp.obj.EncodeTo(e) - } -} - -func (resp *rpcResponse) DecodeFrom(d *types.Decoder) { - if isErr := d.ReadBool(); isErr { - resp.err = new(Error) - resp.err.DecodeFrom(d) - } else { - resp.obj.DecodeFrom(d) - } -} - -func (resp *rpcResponse) MaxLen() int { - return 1 + resp.err.MaxLen() + resp.obj.MaxLen() -} - -// WriteObject writes obj to w. -func WriteObject(w io.Writer, obj Object) error { - e := types.NewEncoder(w) - obj.EncodeTo(e) - return e.Flush() -} - -// ReadObject reads obj from r. -func ReadObject(r io.Reader, obj Object) error { - d := types.NewDecoder(io.LimitedReader{R: r, N: int64(obj.MaxLen())}) - obj.DecodeFrom(d) - return d.Err() -} - -// WriteRequest sends an RPC request, comprising an RPC ID and an optional -// request object. -func WriteRequest(w io.Writer, id Specifier, req Object) error { - if err := WriteObject(w, &id); err != nil { - return fmt.Errorf("couldn't write request ID: %w", err) - } - if req != nil { - if err := WriteObject(w, req); err != nil { - return fmt.Errorf("couldn't write request object: %w", err) - } - } - return nil -} - -// ReadID reads an RPC request ID. -func ReadID(r io.Reader) (id Specifier, err error) { - err = ReadObject(r, &id) - return -} - -// ReadRequest reads an RPC request. -func ReadRequest(r io.Reader, req Object) error { - return ReadObject(r, req) -} - -// WriteResponse writes an RPC response object to w. -func WriteResponse(w io.Writer, resp Object) error { - return WriteObject(w, &rpcResponse{obj: resp}) -} - -// WriteResponseErr writes an RPC error to w. If err is an *rpc.Error, it is sent directly; otherwise, a generic -// rpc.Error is created from err's Error string. -func WriteResponseErr(w io.Writer, err error) error { - re, ok := err.(*Error) - if err != nil && !ok { - re = &Error{Description: err.Error()} - } - return WriteObject(w, &rpcResponse{err: re}) -} - -// ReadResponse reads an RPC response. If the response is an error, it is -// returned directly. -func ReadResponse(r io.Reader, resp Object) error { - rr := rpcResponse{obj: resp} - if err := ReadObject(r, &rr); err != nil { - return fmt.Errorf("failed to read message: %w", err) - } else if rr.err != nil { - return fmt.Errorf("response error: %w", rr.err) - } - return nil -} diff --git a/v2/types/currency.go b/v2/types/currency.go deleted file mode 100644 index f7f4b9ff..00000000 --- a/v2/types/currency.go +++ /dev/null @@ -1,318 +0,0 @@ -package types - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "math/bits" - "strings" -) - -var ( - // ZeroCurrency represents zero base units. - ZeroCurrency Currency - - // HastingsPerSiacoin is the number of hastings (base units) in a siacoin. - HastingsPerSiacoin = NewCurrency(2003764205206896640, 54210) // 10^24 -) - -// Currency represents a quantity of hastings as an unsigned 128-bit number. -type Currency struct { - Lo, Hi uint64 -} - -// NewCurrency returns the Currency value (lo,hi). -func NewCurrency(lo, hi uint64) Currency { - return Currency{lo, hi} -} - -// NewCurrency64 converts c to a Currency value. -func NewCurrency64(c uint64) Currency { - return Currency{c, 0} -} - -// Siacoins returns a Currency value representing n siacoins. -func Siacoins(n uint32) Currency { return HastingsPerSiacoin.Mul64(uint64(n)) } - -// IsZero returns true if c == 0. -func (c Currency) IsZero() bool { - return c == ZeroCurrency -} - -// Equals returns true if c == v. -// -// Currency values can be compared directly with ==, but use of the Equals method -// is preferred for consistency. -func (c Currency) Equals(v Currency) bool { - return c == v -} - -// Cmp compares c and v and returns: -// -// -1 if c < v -// 0 if c == v -// +1 if c > v -func (c Currency) Cmp(v Currency) int { - if c == v { - return 0 - } else if c.Hi < v.Hi || (c.Hi == v.Hi && c.Lo < v.Lo) { - return -1 - } else { - return 1 - } -} - -// Add returns c+v. If the result would overflow, Add panics. -// -// It is safe to use Add in any context where the sum cannot exceed the total -// supply of Currency (such as when calculating the balance of a wallet). In -// less-trusted contexts (such as when validating a transaction), -// AddWithOverflow should be used instead. -func (c Currency) Add(v Currency) Currency { - s, overflow := c.AddWithOverflow(v) - if overflow { - panic("overflow") - } - return s -} - -// AddWithOverflow returns c+v, along with a boolean indicating whether the -// result overflowed. -func (c Currency) AddWithOverflow(v Currency) (Currency, bool) { - lo, carry := bits.Add64(c.Lo, v.Lo, 0) - hi, carry := bits.Add64(c.Hi, v.Hi, carry) - return Currency{lo, hi}, carry != 0 -} - -// Sub returns c-v. If the result would underflow, Sub panics. -func (c Currency) Sub(v Currency) Currency { - s, underflow := c.SubWithUnderflow(v) - if underflow { - panic("underflow") - } - return s -} - -// SubWithUnderflow returns c-v, along with a boolean indicating whether the result -// underflowed. -func (c Currency) SubWithUnderflow(v Currency) (Currency, bool) { - lo, borrow := bits.Sub64(c.Lo, v.Lo, 0) - hi, borrow := bits.Sub64(c.Hi, v.Hi, borrow) - return Currency{lo, hi}, borrow != 0 -} - -// Mul64 returns c*v. If the result would overflow, Mul64 panics. -// -// Note that it is safe to multiply any two Currency values that are below 2^64. -func (c Currency) Mul64(v uint64) Currency { - // NOTE: this is the overflow-checked equivalent of: - // - // hi, lo := bits.Mul64(c.Lo, v) - // hi += c.Hi * v - // - hi0, lo0 := bits.Mul64(c.Lo, v) - hi1, lo1 := bits.Mul64(c.Hi, v) - hi2, c0 := bits.Add64(hi0, lo1, 0) - if hi1 != 0 || c0 != 0 { - panic("overflow") - } - return Currency{lo0, hi2} -} - -// Div returns c/v. If v == 0, Div panics. -func (c Currency) Div(v Currency) Currency { - q, _ := c.quoRem(v) - return q -} - -// Div64 returns c/v. If v == 0, Div panics. -func (c Currency) Div64(v uint64) Currency { - q, _ := c.quoRem64(v) - return q -} - -// quoRem returns q = c/v and r = c%v. If v == ZeroCurrency, Div panics. -func (c Currency) quoRem(v Currency) (q, r Currency) { - if v.Hi == 0 { - var r64 uint64 - q, r64 = c.quoRem64(v.Lo) - r = NewCurrency64(r64) - } else { - // generate a "trial quotient," guaranteed to be within 1 of the actual - // quotient, then adjust. - n := bits.LeadingZeros64(v.Hi) - v1 := NewCurrency(v.Lo<>(64-n)) // v << n - u1 := NewCurrency(c.Lo>>1|c.Hi<<63, c.Hi>>1) // c >> 1 - tq, _ := bits.Div64(u1.Hi, u1.Lo, v1.Hi) - tq >>= 63 - n - if tq != 0 { - tq-- - } - q = NewCurrency64(tq) - // calculate remainder using trial quotient, then adjust if remainder is - // greater than divisor - r = c.Sub(v.Mul64(tq)) - if r.Cmp(v) >= 0 { - // increment q - if q.Lo++; q.Lo == 0 { - q.Hi++ - } - r = r.Sub(v) - } - } - return -} - -// quoRem64 returns q = c/v and r = c%v. -func (c Currency) quoRem64(v uint64) (q Currency, r uint64) { - if c.Hi < v { - q.Lo, r = bits.Div64(c.Hi, c.Lo, v) - } else { - q.Hi, r = bits.Div64(0, c.Hi, v) - q.Lo, r = bits.Div64(r, c.Lo, v) - } - return -} - -// Big returns c as a *big.Int. -func (c Currency) Big() *big.Int { - b := make([]byte, 16) - binary.BigEndian.PutUint64(b[:8], c.Hi) - binary.BigEndian.PutUint64(b[8:], c.Lo) - return new(big.Int).SetBytes(b) -} - -// ExactString returns the base-10 representation of c as a string. -func (c Currency) ExactString() string { - if c.IsZero() { - return "0" - } - buf := []byte("0000000000000000000000000000000000000000") // log10(2^128) < 40 - for i := len(buf); ; i -= 19 { - q, r := c.quoRem64(1e19) // largest power of 10 that fits in a uint64 - var n int - for ; r != 0; r /= 10 { - n++ - buf[i-n] += byte(r % 10) - } - if q.IsZero() { - return string(buf[i-n:]) - } - c = q - } -} - -// String returns base-10 representation of c with a unit suffix. The value may -// be rounded. To avoid loss of precision, use ExactString. -func (c Currency) String() string { - i := c.Big() - pico := HastingsPerSiacoin.Div64(1e12).Big() - if i.Cmp(pico) < 0 { - return i.String() + " H" - } - - // iterate until we find a unit greater than c - mag := pico - unit := "" - for _, unit = range []string{"pS", "nS", "uS", "mS", "SC", "KS", "MS", "GS", "TS"} { - j := new(big.Int).Mul(mag, big.NewInt(1e3)) - if i.Cmp(j) < 0 { - break - } else if unit != "TS" { - // don't want to perform this multiply on the last iter; that - // would give us 1.235 TS instead of 1235 TS - mag = j - } - } - - num := new(big.Rat).SetInt(i) - denom := new(big.Rat).SetInt(mag) - f, _ := new(big.Rat).Mul(num, denom.Inv(denom)).Float64() - return fmt.Sprintf("%.4g %s", f, unit) -} - -// Format implements fmt.Formatter. It accepts the following formats: -// -// d: raw integer (equivalent to ExactString()) -// s: rounded integer with unit suffix (equivalent to String()) -// v: same as s -func (c Currency) Format(f fmt.State, v rune) { - switch v { - case 'd': - io.WriteString(f, c.ExactString()) - case 's', 'v': - io.WriteString(f, c.String()) - default: - fmt.Fprintf(f, "%%!%c(unsupported,Currency=%d)", v, c) - } -} - -// MarshalJSON implements json.Marshaler. -func (c Currency) MarshalJSON() ([]byte, error) { - return []byte(`"` + c.ExactString() + `"`), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (c *Currency) UnmarshalJSON(b []byte) (err error) { - *c, err = parseExactCurrency(strings.Trim(string(b), `"`)) - return -} - -func parseExactCurrency(s string) (Currency, error) { - i, ok := new(big.Int).SetString(s, 10) - if !ok { - return ZeroCurrency, errors.New("not an integer") - } else if i.Sign() < 0 { - return ZeroCurrency, errors.New("value cannot be negative") - } else if i.BitLen() > 128 { - return ZeroCurrency, errors.New("value overflows Currency representation") - } - return NewCurrency(i.Uint64(), new(big.Int).Rsh(i, 64).Uint64()), nil -} - -func expToUnit(exp int64) *big.Rat { - return new(big.Rat).SetInt(new(big.Int).Exp(big.NewInt(10), big.NewInt(exp), nil)) -} - -var currencyUnits = map[string]*big.Rat{ - "pS": expToUnit(12), - "nS": expToUnit(15), - "uS": expToUnit(18), - "mS": expToUnit(21), - "SC": expToUnit(24), - "KS": expToUnit(27), - "MS": expToUnit(30), - "GS": expToUnit(33), - "TS": expToUnit(36), -} - -// ParseCurrency parses s as a Currency value. The format of s should match one -// of the representations provided by (Currency).Format. -func ParseCurrency(s string) (Currency, error) { - i := strings.LastIndexAny(s, "0123456789.") + 1 - if i == 0 { - return ZeroCurrency, errors.New("not a number") - } - n, unit := s[:i], strings.TrimSpace(s[i:]) - if unit == "" || unit == "H" { - return parseExactCurrency(n) - } - // parse numeric part as a big.Rat - r, ok := new(big.Rat).SetString(n) - if !ok { - return ZeroCurrency, errors.New("not a number") - } - // multiply by unit - u, ok := currencyUnits[unit] - if !ok { - return ZeroCurrency, fmt.Errorf("invalid unit %q", unit) - } - r.Mul(r, u) - // r must be an integer at this point - if !r.IsInt() { - return ZeroCurrency, errors.New("not an integer") - } - return parseExactCurrency(r.RatString()) -} diff --git a/v2/types/currency_test.go b/v2/types/currency_test.go deleted file mode 100644 index 1b3f3b96..00000000 --- a/v2/types/currency_test.go +++ /dev/null @@ -1,599 +0,0 @@ -package types - -import ( - "math" - "testing" -) - -var maxCurrency = NewCurrency(math.MaxUint64, math.MaxUint64) - -func mustParseCurrency(s string) Currency { - c, err := ParseCurrency(s) - if err != nil { - panic(err) - } - return c -} - -func TestCurrencyCmp(t *testing.T) { - tests := []struct { - a, b Currency - want int - }{ - { - ZeroCurrency, - ZeroCurrency, - 0, - }, - { - ZeroCurrency, - NewCurrency64(5), - -1, - }, - { - NewCurrency64(5), - ZeroCurrency, - 1, - }, - { - NewCurrency(0, 1), - NewCurrency(0, 1), - 0, - }, - { - NewCurrency(math.MaxUint64, 0), - NewCurrency(0, 1), - -1, - }, - { - NewCurrency(0, 1), - NewCurrency(math.MaxUint64, 0), - 1, - }, - } - for _, tt := range tests { - if got := tt.a.Cmp(tt.b); got != tt.want { - t.Errorf("Currency.Cmp(%d, %d) expected = %d, want %d", tt.a, tt.b, got, tt.want) - } - } -} - -func TestCurrencyAdd(t *testing.T) { - tests := []struct { - a, b, want Currency - }{ - { - ZeroCurrency, - ZeroCurrency, - ZeroCurrency, - }, - { - NewCurrency(1, 0), - NewCurrency(1, 0), - NewCurrency(2, 0), - }, - { - NewCurrency(200, 0), - NewCurrency(50, 0), - NewCurrency(250, 0), - }, - { - NewCurrency(0, 1), - NewCurrency(0, 1), - NewCurrency(0, 2), - }, - { - NewCurrency(0, 71), - NewCurrency(math.MaxUint64, 0), - NewCurrency(math.MaxUint64, 71), - }, - } - for _, tt := range tests { - if got := tt.a.Add(tt.b); !got.Equals(tt.want) { - t.Errorf("Currency.Add(%d, %d) = %d, want %d", tt.a, tt.b, got, tt.want) - } - } -} - -func TestCurrencyAddWithOverflow(t *testing.T) { - tests := []struct { - a, b, want Currency - overflows bool - }{ - { - ZeroCurrency, - ZeroCurrency, - ZeroCurrency, - false, - }, - { - NewCurrency(1, 0), - NewCurrency(1, 0), - NewCurrency(2, 0), - false, - }, - { - NewCurrency(200, 0), - NewCurrency(50, 0), - NewCurrency(250, 0), - false, - }, - { - NewCurrency(0, 1), - NewCurrency(0, 1), - NewCurrency(0, 2), - false, - }, - { - NewCurrency(0, 71), - NewCurrency(math.MaxUint64, 0), - NewCurrency(math.MaxUint64, 71), - false, - }, - { - maxCurrency, - NewCurrency64(1), - ZeroCurrency, - true, - }, - } - for _, tt := range tests { - got, overflows := tt.a.AddWithOverflow(tt.b) - if tt.overflows != overflows { - t.Errorf("Currency.AddWithOverflow(%d, %d) overflow %t, want %t", tt.a, tt.b, overflows, tt.overflows) - } else if !got.Equals(tt.want) { - t.Errorf("Currency.AddWithOverflow(%d, %d) expected = %v, got %v", tt.a, tt.b, tt.want, got) - } - } -} - -func TestCurrencySub(t *testing.T) { - tests := []struct { - a, b, want Currency - }{ - { - ZeroCurrency, - ZeroCurrency, - ZeroCurrency, - }, - { - NewCurrency(1, 0), - NewCurrency(1, 0), - ZeroCurrency, - }, - { - NewCurrency(1, 0), - ZeroCurrency, - NewCurrency(1, 0), - }, - { - NewCurrency(0, 1), - NewCurrency(math.MaxUint64, 0), - NewCurrency(1, 0), - }, - { - NewCurrency(0, 1), - NewCurrency(1, 0), - NewCurrency(math.MaxUint64, 0), - }, - } - for _, tt := range tests { - if got := tt.a.Sub(tt.b); !got.Equals(tt.want) { - t.Errorf("Currency.Sub(%d, %d) = %d, want %d", tt.a, tt.b, got, tt.want) - } - } -} - -func TestCurrencySubWithUnderflow(t *testing.T) { - tests := []struct { - a, b, want Currency - underflows bool - }{ - { - ZeroCurrency, - ZeroCurrency, - ZeroCurrency, - false, - }, - { - NewCurrency(1, 0), - NewCurrency(1, 0), - ZeroCurrency, - false, - }, - { - NewCurrency(1, 0), - ZeroCurrency, - NewCurrency(1, 0), - false, - }, - { - NewCurrency(0, 1), - NewCurrency(math.MaxUint64, 0), - NewCurrency(1, 0), - false, - }, - { - NewCurrency(0, 1), - NewCurrency(1, 0), - NewCurrency(math.MaxUint64, 0), - false, - }, - { - ZeroCurrency, - NewCurrency64(1), - maxCurrency, - true, - }, - { - NewCurrency(0, 1), - NewCurrency(1, 1), - maxCurrency, - true, - }, - { - NewCurrency(1, 0), - NewCurrency(20, 0), - NewCurrency(math.MaxUint64-18, math.MaxUint64), - true, - }, - { - NewCurrency(1, 1), - NewCurrency(20, 1), - NewCurrency(math.MaxUint64-18, math.MaxUint64), - true, - }, - { - NewCurrency(math.MaxUint64, 0), - NewCurrency(0, 1), - maxCurrency, - true, - }, - } - for _, tt := range tests { - diff, underflows := tt.a.SubWithUnderflow(tt.b) - if tt.underflows != underflows { - t.Fatalf("Currency.SubWithUnderflow(%d, %d) underflow %t, want %t", tt.a, tt.b, underflows, tt.underflows) - } else if !diff.Equals(tt.want) { - t.Fatalf("Currency.SubWithUnderflow(%d, %d) expected = %d, got %d", tt.a, tt.b, tt.want, diff) - } - } -} - -func TestCurrencyMul64(t *testing.T) { - tests := []struct { - a Currency - b uint64 - want Currency - }{ - { - ZeroCurrency, - 0, - ZeroCurrency, - }, - { - NewCurrency(1, 0), - 1, - NewCurrency(1, 0), - }, - { - NewCurrency(0, 1), - 1, - NewCurrency(0, 1), - }, - { - NewCurrency(0, 1), - math.MaxUint64, - NewCurrency(0, math.MaxUint64), - }, - { - Siacoins(30), - 50, - Siacoins(1500), - }, - { - NewCurrency(math.MaxUint64, 0), - 2, - NewCurrency(math.MaxUint64-1, 1), - }, - } - for _, tt := range tests { - if got := tt.a.Mul64(tt.b); !got.Equals(tt.want) { - t.Errorf("Currency.Mul64(%d, %d) = %d, want %d", tt.a, tt.b, got, tt.want) - } - } -} - -func TestCurrencyDiv(t *testing.T) { - tests := []struct { - a, b, want Currency - }{ - { - ZeroCurrency, - NewCurrency64(1), - ZeroCurrency, - }, - { - NewCurrency(1, 0), - NewCurrency(1, 0), - NewCurrency(1, 0), - }, - { - Siacoins(156), - NewCurrency(2, 0), - Siacoins(78), - }, - { - Siacoins(300), - Siacoins(2), - NewCurrency(150, 0), - }, - { - NewCurrency(0, 1), - NewCurrency(1, 0), - NewCurrency(0, 1), - }, - { - NewCurrency(0, 1), - NewCurrency(0, 1), - NewCurrency(1, 0), - }, - { - NewCurrency(0, 1), - NewCurrency(2, 0), - NewCurrency(math.MaxUint64/2+1, 0), - }, - { - NewCurrency(8262254095159001088, 2742357), - NewCurrency64(2), - NewCurrency(13354499084434276352, 1371178), - }, - { - maxCurrency, - NewCurrency64(2), - NewCurrency(math.MaxUint64, math.MaxUint64/2), - }, - } - for _, tt := range tests { - if got := tt.a.Div(tt.b); !got.Equals(tt.want) { - t.Errorf("Currency.Div(%d, %d) = %d, want %d", tt.a, tt.b, got, tt.want) - } - } -} - -func TestCurrencyDiv64(t *testing.T) { - tests := []struct { - a Currency - b uint64 - want Currency - }{ - { - ZeroCurrency, - 1, - ZeroCurrency, - }, - { - NewCurrency64(1), - 1, - NewCurrency64(1), - }, - { - Siacoins(156), - 2, - Siacoins(78), - }, - { - maxCurrency, - 2, - NewCurrency(math.MaxUint64, math.MaxUint64/2), - }, - } - for _, tt := range tests { - if got := tt.a.Div64(tt.b); !got.Equals(tt.want) { - t.Errorf("Currency.Div64(%d, %d) = %d, want %d", tt.a, tt.b, got, tt.want) - } - } -} - -func TestCurrencyExactString(t *testing.T) { - tests := []struct { - val Currency - want string - }{ - { - ZeroCurrency, - "0", - }, - { - Siacoins(128), - "128000000000000000000000000", - }, - { - NewCurrency64(math.MaxUint64), - "18446744073709551615", - }, - { - NewCurrency(8262254095159001088, 2742357), - "50587566000000000000000000", - }, - { - maxCurrency, - "340282366920938463463374607431768211455", - }, - } - for _, tt := range tests { - if got := tt.val.ExactString(); got != tt.want { - t.Errorf("Currency.ExactString() = %v, want %v", got, tt.want) - } - } -} - -func TestCurrencyString(t *testing.T) { - tests := []struct { - val Currency - want string - }{ - { - ZeroCurrency, - "0 H", - }, - { - NewCurrency64(10000), - "10000 H", - }, - { - NewCurrency(8262254095159001088, 2742357), - "50.59 SC", - }, - { - NewCurrency(2174395257947586975, 137), - "2.529 mS", - }, - } - for _, tt := range tests { - if got := tt.val.String(); got != tt.want { - t.Errorf("Currency.String() = %v (%d), want %v", got, tt.val, tt.want) - } - } -} - -func TestCurrencyJSON(t *testing.T) { - tests := []struct { - val Currency - want string - }{ - { - ZeroCurrency, - `"0"`, - }, - { - NewCurrency64(10000), - `"10000"`, - }, - { - mustParseCurrency("50587566000000000000000000"), - `"50587566000000000000000000"`, - }, - { - mustParseCurrency("2529378333356156158367"), - `"2529378333356156158367"`, - }, - } - for _, tt := range tests { - // MarshalJSON cannot error - buf, _ := tt.val.MarshalJSON() - if string(buf) != tt.want { - t.Errorf("Currency.MarshalJSON(%d) = %s, want %s", tt.val, buf, tt.want) - continue - } - - var c Currency - if err := c.UnmarshalJSON(buf); err != nil { - t.Errorf("Currency.UnmarshalJSON(%s) err = %v", buf, err) - } else if !c.Equals(tt.val) { - t.Errorf("Currency.UnmarshalJSON(%s) = %d, want %d", buf, c, tt.val) - } - } -} - -func TestParseCurrency(t *testing.T) { - tests := []struct { - s string - want Currency - wantErr bool - }{ - { - "", - ZeroCurrency, - true, - }, - { - "-1", - ZeroCurrency, - true, - }, - { - "340282366920938463463374607431768211456", - ZeroCurrency, - true, - }, - { - "0", - ZeroCurrency, - false, - }, - { - "10000", - NewCurrency64(10000), - false, - }, - { - "50587566000000000000000000", - NewCurrency(8262254095159001088, 2742357), - false, - }, - { - "2529378333356156158367", - NewCurrency(2174395257947586975, 137), - false, - }, - { - "2529378333356156158367", - NewCurrency(2174395257947586975, 137), - false, - }, - { - "1 SC", - Siacoins(1), - false, - }, - { - "1000 mS", - Siacoins(1), - false, - }, - { - "123 mS", - Siacoins(123).Div64(1000), - false, - }, - { - "2.000000000000000000000001 SC", - Siacoins(2).Add(NewCurrency64(1)), - false, - }, - { - "12.345 GS", - Siacoins(12345).Mul64(1e6), - false, - }, - { - "1 foo", - ZeroCurrency, - true, - }, - { - "foo MS", - ZeroCurrency, - true, - }, - { - ".... SC", - ZeroCurrency, - true, - }, - { - "0.0000000000000000000000001 SC", - ZeroCurrency, - true, - }, - } - for _, tt := range tests { - got, err := ParseCurrency(tt.s) - if (err != nil) != tt.wantErr { - t.Errorf("ParseCurrency(%v) error = %v, wantErr %v", tt.s, err, tt.wantErr) - } else if !got.Equals(tt.want) { - t.Errorf("ParseCurrency(%v) = %d, want %d", tt.s, got, tt.want) - } - } -} diff --git a/v2/types/encoding.go b/v2/types/encoding.go deleted file mode 100644 index b14a13b4..00000000 --- a/v2/types/encoding.go +++ /dev/null @@ -1,873 +0,0 @@ -package types - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "time" - - "golang.org/x/crypto/blake2b" -) - -// An Encoder writes Sia objects to an underlying stream. -type Encoder struct { - w io.Writer - buf [1024]byte - n int - err error -} - -// Flush writes any pending data to the underlying stream. It returns the first -// error encountered by the Encoder. -func (e *Encoder) Flush() error { - if e.err == nil && e.n > 0 { - _, e.err = e.w.Write(e.buf[:e.n]) - e.n = 0 - } - return e.err -} - -// Write implements io.Writer. -func (e *Encoder) Write(p []byte) (int, error) { - lenp := len(p) - for e.err == nil && len(p) > 0 { - if e.n == len(e.buf) { - e.Flush() - } - c := copy(e.buf[e.n:], p) - e.n += c - p = p[c:] - } - return lenp, e.err -} - -// WriteBool writes a bool value to the underlying stream. -func (e *Encoder) WriteBool(b bool) { - var buf [1]byte - if b { - buf[0] = 1 - } - e.Write(buf[:]) -} - -// WriteUint8 writes a uint8 value to the underlying stream. -func (e *Encoder) WriteUint8(u uint8) { - e.Write([]byte{u}) -} - -// WriteUint64 writes a uint64 value to the underlying stream. -func (e *Encoder) WriteUint64(u uint64) { - var buf [8]byte - binary.LittleEndian.PutUint64(buf[:], u) - e.Write(buf[:]) -} - -// WritePrefix writes a length prefix to the underlying stream. -func (e *Encoder) WritePrefix(i int) { e.WriteUint64(uint64(i)) } - -// WriteTime writes a time.Time value to the underlying stream. -func (e *Encoder) WriteTime(t time.Time) { e.WriteUint64(uint64(t.Unix())) } - -// WriteBytes writes a length-prefixed []byte to the underlying stream. -func (e *Encoder) WriteBytes(b []byte) { - e.WritePrefix(len(b)) - e.Write(b) -} - -// WriteString writes a length-prefixed string to the underlying stream. -func (e *Encoder) WriteString(s string) { - e.WriteBytes([]byte(s)) -} - -// NewEncoder returns an Encoder that wraps the provided stream. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - } -} - -// An EncoderTo can encode itself to a stream via an Encoder. -type EncoderTo interface { - EncodeTo(e *Encoder) -} - -// EncodedLen returns the length of v when encoded. -func EncodedLen(v interface{}) int { - var buf bytes.Buffer - e := NewEncoder(&buf) - if et, ok := v.(EncoderTo); ok { - et.EncodeTo(e) - } else { - switch v := v.(type) { - case bool: - e.WriteBool(v) - case uint64: - e.WriteUint64(v) - case time.Time: - e.WriteTime(v) - case []byte: - e.WritePrefix(len(v)) - e.Write(v) - default: - panic(fmt.Sprintf("cannot encode type %T", v)) - } - } - _ = e.Flush() // no error possible - return buf.Len() -} - -// A Decoder reads values from an underlying stream. Callers MUST check -// (*Decoder).Err before using any decoded values. -type Decoder struct { - lr io.LimitedReader - buf [64]byte - err error -} - -// SetErr sets the Decoder's error if it has not already been set. SetErr should -// only be called from DecodeFrom methods. -func (d *Decoder) SetErr(err error) { - if err != nil && d.err == nil { - d.err = err - // clear d.buf so that future reads always return zero - d.buf = [len(d.buf)]byte{} - } -} - -// Err returns the first error encountered during decoding. -func (d *Decoder) Err() error { return d.err } - -// Read implements the io.Reader interface. It always returns an error if fewer -// than len(p) bytes were read. -func (d *Decoder) Read(p []byte) (int, error) { - n := 0 - for len(p[n:]) > 0 && d.err == nil { - want := len(p[n:]) - if want > len(d.buf) { - want = len(d.buf) - } - var read int - read, d.err = io.ReadFull(&d.lr, d.buf[:want]) - n += copy(p[n:], d.buf[:read]) - } - return n, d.err -} - -// ReadBool reads a bool value from the underlying stream. -func (d *Decoder) ReadBool() bool { - d.Read(d.buf[:1]) - switch d.buf[0] { - case 0: - return false - case 1: - return true - default: - d.SetErr(fmt.Errorf("invalid bool value (%v)", d.buf[0])) - return false - } -} - -// ReadUint8 reads a uint8 value from the underlying stream. -func (d *Decoder) ReadUint8() uint8 { - d.Read(d.buf[:1]) - return d.buf[0] -} - -// ReadUint64 reads a uint64 value from the underlying stream. -func (d *Decoder) ReadUint64() uint64 { - d.Read(d.buf[:8]) - return binary.LittleEndian.Uint64(d.buf[:8]) -} - -// ReadPrefix reads a length prefix from the underlying stream. If the length -// exceeds the number of bytes remaining in the stream, ReadPrefix sets d.Err -// and returns 0. -func (d *Decoder) ReadPrefix() int { - n := d.ReadUint64() - if n > uint64(d.lr.N) { - d.SetErr(fmt.Errorf("encoded object contains invalid length prefix (%v elems > %v bytes left in stream)", n, d.lr.N)) - return 0 - } - return int(n) -} - -// ReadTime reads a time.Time from the underlying stream. -func (d *Decoder) ReadTime() time.Time { return time.Unix(int64(d.ReadUint64()), 0).UTC() } - -// ReadBytes reads a length-prefixed []byte from the underlying stream. -func (d *Decoder) ReadBytes() []byte { - b := make([]byte, d.ReadPrefix()) - d.Read(b) - return b -} - -// ReadString reads a length-prefixed string from the underlying stream. -func (d *Decoder) ReadString() string { - return string(d.ReadBytes()) -} - -// NewDecoder returns a Decoder that wraps the provided stream. -func NewDecoder(lr io.LimitedReader) *Decoder { - return &Decoder{ - lr: lr, - } -} - -// A DecoderFrom can decode itself from a stream via a Decoder. -type DecoderFrom interface { - DecodeFrom(d *Decoder) -} - -// NewBufDecoder returns a Decoder for the provided byte slice. -func NewBufDecoder(buf []byte) *Decoder { - return NewDecoder(io.LimitedReader{ - R: bytes.NewReader(buf), - N: int64(len(buf)), - }) -} - -// A Hasher streams objects into an instance of Sia's hash function. -type Hasher struct { - h hash.Hash - E *Encoder -} - -// Reset resets the underlying hash digest state. -func (h *Hasher) Reset() { h.h.Reset() } - -// Sum returns the digest of the objects written to the Hasher. -func (h *Hasher) Sum() (sum Hash256) { - _ = h.E.Flush() // no error possible - h.h.Sum(sum[:0]) - return -} - -// NewHasher returns a new Hasher instance. -func NewHasher() *Hasher { - h, _ := blake2b.New256(nil) - e := NewEncoder(h) - return &Hasher{h, e} -} - -// implementations of EncoderTo and DecoderFrom for core types - -// EncodeTo implements types.EncoderTo. -func (h Hash256) EncodeTo(e *Encoder) { e.Write(h[:]) } - -// EncodeTo implements types.EncoderTo. -func (id BlockID) EncodeTo(e *Encoder) { e.Write(id[:]) } - -// EncodeTo implements types.EncoderTo. -func (id TransactionID) EncodeTo(e *Encoder) { e.Write(id[:]) } - -// EncodeTo implements types.EncoderTo. -func (a Address) EncodeTo(e *Encoder) { e.Write(a[:]) } - -// EncodeTo implements types.EncoderTo. -func (pk PublicKey) EncodeTo(e *Encoder) { e.Write(pk[:]) } - -// EncodeTo implements types.EncoderTo. -func (s Signature) EncodeTo(e *Encoder) { e.Write(s[:]) } - -// EncodeTo implements types.EncoderTo. -func (w Work) EncodeTo(e *Encoder) { e.Write(w.NumHashes[:]) } - -// EncodeTo implements types.EncoderTo. -func (c Currency) EncodeTo(e *Encoder) { - e.WriteUint64(c.Lo) - e.WriteUint64(c.Hi) -} - -// EncodeTo implements types.EncoderTo. -func (index ChainIndex) EncodeTo(e *Encoder) { - e.WriteUint64(index.Height) - index.ID.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (h BlockHeader) EncodeTo(e *Encoder) { - e.WriteUint64(h.Height) - h.ParentID.EncodeTo(e) - e.WriteUint64(h.Nonce) - e.WriteTime(h.Timestamp) - h.MinerAddress.EncodeTo(e) - h.Commitment.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (id ElementID) EncodeTo(e *Encoder) { - id.Source.EncodeTo(e) - e.WriteUint64(id.Index) -} - -// EncodeTo implements types.EncoderTo. -func (sco SiacoinOutput) EncodeTo(e *Encoder) { - sco.Value.EncodeTo(e) - sco.Address.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (sfo SiafundOutput) EncodeTo(e *Encoder) { - e.WriteUint64(sfo.Value) - sfo.Address.EncodeTo(e) -} - -func (e *Encoder) writeMerkleProof(proof []Hash256) { - e.WritePrefix(len(proof)) - for _, p := range proof { - p.EncodeTo(e) - } -} - -// EncodeTo implements types.EncoderTo. -func (se StateElement) EncodeTo(e *Encoder) { - se.ID.EncodeTo(e) - e.WriteUint64(se.LeafIndex) - e.writeMerkleProof(se.MerkleProof) -} - -// EncodeTo implements types.EncoderTo. -func (in SiacoinInput) EncodeTo(e *Encoder) { - in.Parent.EncodeTo(e) - in.SpendPolicy.EncodeTo(e) - e.WritePrefix(len(in.Signatures)) - for _, sig := range in.Signatures { - sig.EncodeTo(e) - } -} - -// EncodeTo implements types.EncoderTo. -func (sce SiacoinElement) EncodeTo(e *Encoder) { - sce.StateElement.EncodeTo(e) - sce.SiacoinOutput.EncodeTo(e) - e.WriteUint64(sce.MaturityHeight) -} - -// EncodeTo implements types.EncoderTo. -func (in SiafundInput) EncodeTo(e *Encoder) { - in.Parent.EncodeTo(e) - in.ClaimAddress.EncodeTo(e) - in.SpendPolicy.EncodeTo(e) - e.WritePrefix(len(in.Signatures)) - for _, sig := range in.Signatures { - sig.EncodeTo(e) - } -} - -// EncodeTo implements types.EncoderTo. -func (sfe SiafundElement) EncodeTo(e *Encoder) { - sfe.StateElement.EncodeTo(e) - sfe.SiafundOutput.EncodeTo(e) - sfe.ClaimStart.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (fc FileContract) EncodeTo(e *Encoder) { - e.WriteUint64(fc.Filesize) - fc.FileMerkleRoot.EncodeTo(e) - e.WriteUint64(fc.WindowStart) - e.WriteUint64(fc.WindowEnd) - fc.RenterOutput.EncodeTo(e) - fc.HostOutput.EncodeTo(e) - fc.MissedHostValue.EncodeTo(e) - fc.TotalCollateral.EncodeTo(e) - fc.RenterPublicKey.EncodeTo(e) - fc.HostPublicKey.EncodeTo(e) - e.WriteUint64(fc.RevisionNumber) - fc.RenterSignature.EncodeTo(e) - fc.HostSignature.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (fce FileContractElement) EncodeTo(e *Encoder) { - fce.StateElement.EncodeTo(e) - fce.FileContract.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (rev FileContractRevision) EncodeTo(e *Encoder) { - rev.Parent.EncodeTo(e) - rev.Revision.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (ren FileContractRenewal) EncodeTo(e *Encoder) { - ren.FinalRevision.EncodeTo(e) - ren.InitialRevision.EncodeTo(e) - ren.RenterRollover.EncodeTo(e) - ren.HostRollover.EncodeTo(e) - ren.RenterSignature.EncodeTo(e) - ren.HostSignature.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (sp StorageProof) EncodeTo(e *Encoder) { - sp.WindowStart.EncodeTo(e) - e.writeMerkleProof(sp.WindowProof) - e.Write(sp.Leaf[:]) - e.writeMerkleProof(sp.Proof) -} - -// EncodeTo implements types.EncoderTo. -func (res FileContractResolution) EncodeTo(e *Encoder) { - res.Parent.EncodeTo(e) - var fields uint8 - for i, b := range [...]bool{ - res.HasRenewal(), - res.HasStorageProof(), - res.HasFinalization(), - } { - if b { - fields |= 1 << i - } - } - e.WriteUint8(fields) - if fields&(1<<0) != 0 { - res.Renewal.EncodeTo(e) - } - if fields&(1<<1) != 0 { - res.StorageProof.EncodeTo(e) - } - if fields&(1<<2) != 0 { - res.Finalization.EncodeTo(e) - } -} - -// EncodeTo implements types.EncoderTo. -func (a Attestation) EncodeTo(e *Encoder) { - a.PublicKey.EncodeTo(e) - e.WriteString(a.Key) - e.WriteBytes(a.Value) - a.Signature.EncodeTo(e) -} - -const ( - opInvalid = iota - opAbove - opPublicKey - opThreshold - opUnlockConditions -) - -// EncodeTo implements types.EncoderTo. -func (p SpendPolicy) EncodeTo(e *Encoder) { - var writePolicy func(SpendPolicy) - writePolicy = func(p SpendPolicy) { - switch p := p.Type.(type) { - case PolicyTypeAbove: - e.WriteUint8(opAbove) - e.WriteUint64(uint64(p)) - case PolicyTypePublicKey: - e.WriteUint8(opPublicKey) - PublicKey(p).EncodeTo(e) - case PolicyTypeThreshold: - e.WriteUint8(opThreshold) - e.WriteUint8(p.N) - e.WriteUint8(uint8(len(p.Of))) - for i := range p.Of { - writePolicy(p.Of[i]) - } - case PolicyTypeUnlockConditions: - e.WriteUint8(opUnlockConditions) - e.WriteUint64(p.Timelock) - e.WriteUint8(uint8(len(p.PublicKeys))) - for i := range p.PublicKeys { - p.PublicKeys[i].EncodeTo(e) - } - e.WriteUint8(p.SignaturesRequired) - default: - panic(fmt.Sprintf("unhandled policy type %T", p)) - } - } - const version = 1 - e.WriteUint8(version) - writePolicy(p) -} - -// EncodeTo implements types.EncoderTo. -func (txn Transaction) EncodeTo(e *Encoder) { - const version = 1 - e.WriteUint8(version) - - var fields uint64 - for i, b := range [...]bool{ - len(txn.SiacoinInputs) != 0, - len(txn.SiacoinOutputs) != 0, - len(txn.SiafundInputs) != 0, - len(txn.SiafundOutputs) != 0, - len(txn.FileContracts) != 0, - len(txn.FileContractRevisions) != 0, - len(txn.FileContractResolutions) != 0, - len(txn.Attestations) != 0, - len(txn.ArbitraryData) != 0, - txn.NewFoundationAddress != VoidAddress, - !txn.MinerFee.IsZero(), - } { - if b { - fields |= 1 << i - } - } - e.WriteUint64(fields) - - if fields&(1<<0) != 0 { - e.WritePrefix(len(txn.SiacoinInputs)) - for _, in := range txn.SiacoinInputs { - in.EncodeTo(e) - } - } - if fields&(1<<1) != 0 { - e.WritePrefix(len(txn.SiacoinOutputs)) - for _, out := range txn.SiacoinOutputs { - out.EncodeTo(e) - } - } - if fields&(1<<2) != 0 { - e.WritePrefix(len(txn.SiafundInputs)) - for _, in := range txn.SiafundInputs { - in.EncodeTo(e) - } - } - if fields&(1<<3) != 0 { - e.WritePrefix(len(txn.SiafundOutputs)) - for _, out := range txn.SiafundOutputs { - out.EncodeTo(e) - } - } - if fields&(1<<4) != 0 { - e.WritePrefix(len(txn.FileContracts)) - for _, fc := range txn.FileContracts { - fc.EncodeTo(e) - } - } - if fields&(1<<5) != 0 { - e.WritePrefix(len(txn.FileContractRevisions)) - for _, rev := range txn.FileContractRevisions { - rev.EncodeTo(e) - } - } - if fields&(1<<6) != 0 { - e.WritePrefix(len(txn.FileContractResolutions)) - for _, res := range txn.FileContractResolutions { - res.EncodeTo(e) - } - } - if fields&(1<<7) != 0 { - e.WritePrefix(len(txn.Attestations)) - for _, a := range txn.Attestations { - a.EncodeTo(e) - } - } - if fields&(1<<8) != 0 { - e.WriteBytes(txn.ArbitraryData) - } - if fields&(1<<9) != 0 { - txn.NewFoundationAddress.EncodeTo(e) - } - if fields&(1<<10) != 0 { - txn.MinerFee.EncodeTo(e) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (h *Hash256) DecodeFrom(d *Decoder) { d.Read(h[:]) } - -// DecodeFrom implements types.DecoderFrom. -func (id *BlockID) DecodeFrom(d *Decoder) { d.Read(id[:]) } - -// DecodeFrom implements types.DecoderFrom. -func (id *TransactionID) DecodeFrom(d *Decoder) { d.Read(id[:]) } - -// DecodeFrom implements types.DecoderFrom. -func (a *Address) DecodeFrom(d *Decoder) { d.Read(a[:]) } - -// DecodeFrom implements types.DecoderFrom. -func (pk *PublicKey) DecodeFrom(d *Decoder) { d.Read(pk[:]) } - -// DecodeFrom implements types.DecoderFrom. -func (s *Signature) DecodeFrom(d *Decoder) { d.Read(s[:]) } - -// DecodeFrom implements types.DecoderFrom. -func (w *Work) DecodeFrom(d *Decoder) { d.Read(w.NumHashes[:]) } - -// DecodeFrom implements types.DecoderFrom. -func (c *Currency) DecodeFrom(d *Decoder) { - c.Lo = d.ReadUint64() - c.Hi = d.ReadUint64() -} - -// DecodeFrom implements types.DecoderFrom. -func (index *ChainIndex) DecodeFrom(d *Decoder) { - index.Height = d.ReadUint64() - index.ID.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (h *BlockHeader) DecodeFrom(d *Decoder) { - h.Height = d.ReadUint64() - h.ParentID.DecodeFrom(d) - h.Nonce = d.ReadUint64() - h.Timestamp = d.ReadTime() - h.MinerAddress.DecodeFrom(d) - h.Commitment.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (id *ElementID) DecodeFrom(d *Decoder) { - id.Source.DecodeFrom(d) - id.Index = d.ReadUint64() -} - -// DecodeFrom implements types.DecoderFrom. -func (sco *SiacoinOutput) DecodeFrom(d *Decoder) { - sco.Value.DecodeFrom(d) - sco.Address.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (sfo *SiafundOutput) DecodeFrom(d *Decoder) { - sfo.Value = d.ReadUint64() - sfo.Address.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (p *SpendPolicy) DecodeFrom(d *Decoder) { - const maxPolicies = 1024 - totalPolicies := 1 - var readPolicy func() (SpendPolicy, error) - readPolicy = func() (SpendPolicy, error) { - switch op := d.ReadUint8(); op { - case opAbove: - return PolicyAbove(d.ReadUint64()), nil - case opPublicKey: - var pk PublicKey - pk.DecodeFrom(d) - return PolicyPublicKey(pk), nil - case opThreshold: - n := d.ReadUint8() - of := make([]SpendPolicy, d.ReadUint8()) - totalPolicies += len(of) - if totalPolicies > maxPolicies { - return SpendPolicy{}, errors.New("policy is too complex") - } - var err error - for i := range of { - of[i], err = readPolicy() - if err != nil { - return SpendPolicy{}, err - } - } - return PolicyThreshold(n, of), nil - case opUnlockConditions: - uc := PolicyTypeUnlockConditions{ - Timelock: d.ReadUint64(), - PublicKeys: make([]PublicKey, d.ReadUint8()), - } - for i := range uc.PublicKeys { - uc.PublicKeys[i].DecodeFrom(d) - } - uc.SignaturesRequired = d.ReadUint8() - return SpendPolicy{uc}, nil - default: - return SpendPolicy{}, fmt.Errorf("unknown policy (opcode %v)", op) - } - } - - if version := d.ReadUint8(); version != 1 { - d.SetErr(fmt.Errorf("unsupported policy version (%v)", version)) - return - } - var err error - *p, err = readPolicy() - d.SetErr(err) -} - -func (d *Decoder) readMerkleProof() []Hash256 { - proof := make([]Hash256, d.ReadPrefix()) - for i := range proof { - proof[i].DecodeFrom(d) - } - return proof -} - -// DecodeFrom implements types.DecoderFrom. -func (se *StateElement) DecodeFrom(d *Decoder) { - se.ID.DecodeFrom(d) - se.LeafIndex = d.ReadUint64() - se.MerkleProof = d.readMerkleProof() -} - -// DecodeFrom implements types.DecoderFrom. -func (in *SiacoinInput) DecodeFrom(d *Decoder) { - in.Parent.DecodeFrom(d) - in.SpendPolicy.DecodeFrom(d) - in.Signatures = make([]Signature, d.ReadPrefix()) - for i := range in.Signatures { - in.Signatures[i].DecodeFrom(d) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (sce *SiacoinElement) DecodeFrom(d *Decoder) { - sce.StateElement.DecodeFrom(d) - sce.SiacoinOutput.DecodeFrom(d) - sce.MaturityHeight = d.ReadUint64() -} - -// DecodeFrom implements types.DecoderFrom. -func (in *SiafundInput) DecodeFrom(d *Decoder) { - in.Parent.DecodeFrom(d) - in.ClaimAddress.DecodeFrom(d) - in.SpendPolicy.DecodeFrom(d) - in.Signatures = make([]Signature, d.ReadPrefix()) - for i := range in.Signatures { - in.Signatures[i].DecodeFrom(d) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (sfe *SiafundElement) DecodeFrom(d *Decoder) { - sfe.StateElement.DecodeFrom(d) - sfe.SiafundOutput.DecodeFrom(d) - sfe.ClaimStart.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (fc *FileContract) DecodeFrom(d *Decoder) { - fc.Filesize = d.ReadUint64() - fc.FileMerkleRoot.DecodeFrom(d) - fc.WindowStart = d.ReadUint64() - fc.WindowEnd = d.ReadUint64() - fc.RenterOutput.DecodeFrom(d) - fc.HostOutput.DecodeFrom(d) - fc.MissedHostValue.DecodeFrom(d) - fc.TotalCollateral.DecodeFrom(d) - fc.RenterPublicKey.DecodeFrom(d) - fc.HostPublicKey.DecodeFrom(d) - fc.RevisionNumber = d.ReadUint64() - fc.RenterSignature.DecodeFrom(d) - fc.HostSignature.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (fce *FileContractElement) DecodeFrom(d *Decoder) { - fce.StateElement.DecodeFrom(d) - fce.FileContract.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (rev *FileContractRevision) DecodeFrom(d *Decoder) { - rev.Parent.DecodeFrom(d) - rev.Revision.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (ren *FileContractRenewal) DecodeFrom(d *Decoder) { - ren.FinalRevision.DecodeFrom(d) - ren.InitialRevision.DecodeFrom(d) - ren.RenterRollover.DecodeFrom(d) - ren.HostRollover.DecodeFrom(d) - ren.RenterSignature.DecodeFrom(d) - ren.HostSignature.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (sp *StorageProof) DecodeFrom(d *Decoder) { - sp.WindowStart.DecodeFrom(d) - sp.WindowProof = d.readMerkleProof() - d.Read(sp.Leaf[:]) - sp.Proof = d.readMerkleProof() -} - -// DecodeFrom implements types.DecoderFrom. -func (res *FileContractResolution) DecodeFrom(d *Decoder) { - res.Parent.DecodeFrom(d) - fields := d.ReadUint8() - if fields&(1<<0) != 0 { - res.Renewal.DecodeFrom(d) - } - if fields&(1<<1) != 0 { - res.StorageProof.DecodeFrom(d) - } - if fields&(1<<2) != 0 { - res.Finalization.DecodeFrom(d) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (a *Attestation) DecodeFrom(d *Decoder) { - a.PublicKey.DecodeFrom(d) - a.Key = d.ReadString() - a.Value = d.ReadBytes() - a.Signature.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (txn *Transaction) DecodeFrom(d *Decoder) { - if version := d.ReadUint8(); version != 1 { - d.SetErr(fmt.Errorf("unsupported transaction version (%v)", version)) - return - } - - fields := d.ReadUint64() - - if fields&(1<<0) != 0 { - txn.SiacoinInputs = make([]SiacoinInput, d.ReadPrefix()) - for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].DecodeFrom(d) - } - } - if fields&(1<<1) != 0 { - txn.SiacoinOutputs = make([]SiacoinOutput, d.ReadPrefix()) - for i := range txn.SiacoinOutputs { - txn.SiacoinOutputs[i].DecodeFrom(d) - } - } - if fields&(1<<2) != 0 { - txn.SiafundInputs = make([]SiafundInput, d.ReadPrefix()) - for i := range txn.SiafundInputs { - txn.SiafundInputs[i].DecodeFrom(d) - } - } - if fields&(1<<3) != 0 { - txn.SiafundOutputs = make([]SiafundOutput, d.ReadPrefix()) - for i := range txn.SiafundOutputs { - txn.SiafundOutputs[i].DecodeFrom(d) - } - } - if fields&(1<<4) != 0 { - txn.FileContracts = make([]FileContract, d.ReadPrefix()) - for i := range txn.FileContracts { - txn.FileContracts[i].DecodeFrom(d) - } - } - if fields&(1<<5) != 0 { - txn.FileContractRevisions = make([]FileContractRevision, d.ReadPrefix()) - for i := range txn.FileContractRevisions { - txn.FileContractRevisions[i].DecodeFrom(d) - } - } - if fields&(1<<6) != 0 { - txn.FileContractResolutions = make([]FileContractResolution, d.ReadPrefix()) - for i := range txn.FileContractResolutions { - txn.FileContractResolutions[i].DecodeFrom(d) - } - } - if fields&(1<<7) != 0 { - txn.Attestations = make([]Attestation, d.ReadPrefix()) - for i := range txn.Attestations { - txn.Attestations[i].DecodeFrom(d) - } - } - if fields&(1<<8) != 0 { - txn.ArbitraryData = d.ReadBytes() - } - if fields&(1<<9) != 0 { - txn.NewFoundationAddress.DecodeFrom(d) - } - if fields&(1<<10) != 0 { - txn.MinerFee.DecodeFrom(d) - } -} diff --git a/v2/types/encoding_test.go b/v2/types/encoding_test.go deleted file mode 100644 index f6b92498..00000000 --- a/v2/types/encoding_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package types - -import ( - "bytes" - "encoding" - "io" - "math/rand" - "reflect" - "testing" - "testing/quick" -) - -// Generate implements quick.Generator. -func (p SpendPolicy) Generate(rand *rand.Rand, size int) reflect.Value { - switch rand.Intn(4) + 1 { - case opAbove: - return reflect.ValueOf(PolicyAbove(rand.Uint64())) - case opPublicKey: - var p PublicKey - rand.Read(p[:]) - return reflect.ValueOf(PolicyPublicKey(p)) - case opThreshold: - n := uint8(0) - of := make([]SpendPolicy, rand.Intn(5)) - if len(of) > 0 { - n = uint8(rand.Intn(len(of))) - for i := range of { - of[i] = p.Generate(rand, size).Interface().(SpendPolicy) - } - } - return reflect.ValueOf(PolicyThreshold(n, of)) - case opUnlockConditions: - var p PolicyTypeUnlockConditions - p.Timelock = rand.Uint64() - p.PublicKeys = make([]PublicKey, rand.Intn(5)+1) - p.SignaturesRequired = uint8(rand.Intn(len(p.PublicKeys))) - for i := range p.PublicKeys { - rand.Read(p.PublicKeys[i][:]) - } - return reflect.ValueOf(SpendPolicy{p}) - } - panic("unreachable") -} - -func TestEncoderRoundtrip(t *testing.T) { - tests := []EncoderTo{ - Hash256{0: 0xAA, 31: 0xBB}, - Signature{0: 0xAA, 63: 0xBB}, - Work{NumHashes: [32]byte{0: 0xAA, 31: 0xBB}}, - NewCurrency(5, 5), - ChainIndex{ - Height: 555, - ID: BlockID{0: 0xAA, 31: 0xBB}, - }, - ElementID{ - Source: Hash256{0: 0xAA, 31: 0xBB}, - Index: 5000, - }, - SiacoinOutput{ - Value: NewCurrency(1000, 1000), - Address: Address{0: 0xAA, 31: 0xBB}, - }, - FileContract{ - Filesize: 1000, - FileMerkleRoot: Hash256{0: 0xAA, 31: 0xBB}, - WindowStart: 5000, - WindowEnd: 5000, - }, - } - for _, val := range tests { - var buf bytes.Buffer - e := NewEncoder(&buf) - val.EncodeTo(e) - e.Flush() - decptr := reflect.New(reflect.TypeOf(val)) - decptr.Interface().(DecoderFrom).DecodeFrom(NewBufDecoder(buf.Bytes())) - dec := decptr.Elem().Interface() - if !reflect.DeepEqual(dec, val) { - t.Fatalf("value did not survive roundtrip: expected %v, got %v", val, dec) - } - } -} - -func TestEncoderCompleteness(t *testing.T) { - checkFn := func(txn Transaction) bool { - // NOTE: the compressed Transaction encoding will cause 0-length slices - // to decode as nil, so normalize any 0-length slices to nil now to - // ensure that DeepEqual will work. - txn.SiacoinInputs = append([]SiacoinInput(nil), txn.SiacoinInputs...) - txn.SiacoinOutputs = append([]SiacoinOutput(nil), txn.SiacoinOutputs...) - txn.SiafundInputs = append([]SiafundInput(nil), txn.SiafundInputs...) - txn.SiafundOutputs = append([]SiafundOutput(nil), txn.SiafundOutputs...) - txn.FileContracts = append([]FileContract(nil), txn.FileContracts...) - txn.FileContractRevisions = append([]FileContractRevision(nil), txn.FileContractRevisions...) - txn.FileContractResolutions = append([]FileContractResolution(nil), txn.FileContractResolutions...) - txn.Attestations = append([]Attestation(nil), txn.Attestations...) - txn.ArbitraryData = append([]byte(nil), txn.ArbitraryData...) - - var buf bytes.Buffer - e := NewEncoder(&buf) - txn.EncodeTo(e) - e.Flush() - var decTxn Transaction - decTxn.DecodeFrom(NewBufDecoder(buf.Bytes())) - return reflect.DeepEqual(txn, decTxn) - } - if quick.Check(checkFn, nil) != nil { - t.Fatal("roundtrip test failed; did you forget to update transaction encoder?") - } -} - -func BenchmarkEncoding(b *testing.B) { - v, ok := quick.Value(reflect.TypeOf(Transaction{}), rand.New(rand.NewSource(0))) - if !ok { - b.Fatal("could not generate value") - } - txn := v.Interface().(Transaction) - e := NewEncoder(io.Discard) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - txn.EncodeTo(e) - } -} - -func TestMarshalTextRoundtrip(t *testing.T) { - tests := []encoding.TextMarshaler{ - Hash256{0: 0xAA, 31: 0xBB}, - ChainIndex{ - Height: 555, - ID: BlockID{0: 0xAA, 31: 0xBB}, - }, - ElementID{ - Source: Hash256{0: 0xAA, 31: 0xBB}, - Index: 5000, - }, - Address{0: 0xAA, 31: 0xBB}, - BlockID{0: 0xAA, 31: 0xBB}, - PublicKey{0: 0xAA, 31: 0xBB}, - TransactionID{0: 0xAA, 31: 0xBB}, - Signature{0: 0xAA, 31: 0xBB}, - } - for _, val := range tests { - b, _ := val.MarshalText() - decptr := reflect.New(reflect.TypeOf(val)) - if err := decptr.Interface().(encoding.TextUnmarshaler).UnmarshalText(b); err != nil { - t.Errorf("could not decode %T value: %v", val, err) - continue - } - dec := decptr.Elem().Interface() - if !reflect.DeepEqual(dec, val) { - t.Errorf("%T value did not survive roundtrip: expected %v, got %v", val, val, dec) - } - } -} diff --git a/v2/types/policy.go b/v2/types/policy.go deleted file mode 100644 index bf2bdcd0..00000000 --- a/v2/types/policy.go +++ /dev/null @@ -1,301 +0,0 @@ -package types - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "math/bits" - "strconv" - "strings" -) - -// A SpendPolicy describes the conditions under which an input may be spent. -type SpendPolicy struct { - Type interface{ isPolicy() } -} - -// PolicyTypeAbove requires the input to be spent above a given block height. -type PolicyTypeAbove uint64 - -// PolicyAbove returns a policy that requires the input to be spent above a -// given block height. -func PolicyAbove(height uint64) SpendPolicy { return SpendPolicy{PolicyTypeAbove(height)} } - -// PolicyTypePublicKey requires the input to be signed by a given key. -type PolicyTypePublicKey PublicKey - -// PolicyPublicKey returns a policy that requires the input to be signed by a -// given key. -func PolicyPublicKey(pk PublicKey) SpendPolicy { return SpendPolicy{PolicyTypePublicKey(pk)} } - -// PolicyTypeThreshold requires at least N sub-policies to be satisfied. -type PolicyTypeThreshold struct { - N uint8 - Of []SpendPolicy -} - -// PolicyThreshold returns a policy that requires at least N sub-policies to be -// satisfied. -func PolicyThreshold(n uint8, of []SpendPolicy) SpendPolicy { - return SpendPolicy{PolicyTypeThreshold{n, of}} -} - -// AnyoneCanSpend returns a policy that has no requirements. -func AnyoneCanSpend() SpendPolicy { return PolicyThreshold(0, nil) } - -// PolicyTypeUnlockConditions reproduces the requirements imposed by Sia's -// original "UnlockConditions" type. It exists for compatibility purposes and -// should not be used to construct new policies. -type PolicyTypeUnlockConditions struct { - Timelock uint64 - PublicKeys []PublicKey - SignaturesRequired uint8 -} - -func (PolicyTypeAbove) isPolicy() {} -func (PolicyTypePublicKey) isPolicy() {} -func (PolicyTypeThreshold) isPolicy() {} -func (PolicyTypeUnlockConditions) isPolicy() {} - -func (uc PolicyTypeUnlockConditions) root() Hash256 { - buf := make([]byte, 65) - uint64Leaf := func(u uint64) Hash256 { - buf[0] = 0 - binary.LittleEndian.PutUint64(buf[1:], u) - return HashBytes(buf[:9]) - } - pubkeyLeaf := func(pk PublicKey) Hash256 { - buf[0] = 0 - copy(buf[1:], "ed25519\x00\x00\x00\x00\x00\x00\x00\x00\x00") - binary.LittleEndian.PutUint64(buf[17:], uint64(len(pk))) - copy(buf[25:], pk[:]) - return HashBytes(buf[:57]) - } - nodeHash := func(left, right Hash256) Hash256 { - buf[0] = 1 - copy(buf[1:], left[:]) - copy(buf[33:], right[:]) - return HashBytes(buf[:65]) - } - var trees [8]Hash256 - var numLeaves uint8 - addLeaf := func(h Hash256) { - i := 0 - for ; numLeaves&(1< 0 { - sb.WriteByte(',') - } - sb.WriteString(sp.String()) - } - sb.WriteString("])") - - case PolicyTypeUnlockConditions: - sb.WriteString("uc(") - sb.WriteString(strconv.FormatUint(p.Timelock, 10)) - sb.WriteString(",[") - for i, pk := range p.PublicKeys { - if i > 0 { - sb.WriteByte(',') - } - sb.WriteString(hex.EncodeToString(pk[:])) - } - sb.WriteString("],") - sb.WriteString(strconv.FormatUint(uint64(p.SignaturesRequired), 10)) - sb.WriteByte(')') - } - return sb.String() -} - -// ParseSpendPolicy parses a spend policy from a string. -func ParseSpendPolicy(s string) (SpendPolicy, error) { - var err error // sticky - nextToken := func() string { - s = strings.TrimSpace(s) - i := strings.IndexAny(s, "(),[]") - if err != nil || i == -1 { - return "" - } - t := s[:i] - s = s[i:] - return t - } - consume := func(b byte) { - if err != nil { - return - } - s = strings.TrimSpace(s) - if len(s) == 0 { - err = io.ErrUnexpectedEOF - } else if s[0] != b { - err = fmt.Errorf("expected %q, got %q", b, s[0]) - } else { - s = s[1:] - } - } - peek := func() byte { - if err != nil || len(s) == 0 { - return 0 - } - return s[0] - } - parseInt := func(bitSize int) (u uint64) { - t := nextToken() - if err != nil { - return 0 - } - u, err = strconv.ParseUint(t, 10, bitSize) - return - } - parsePubkey := func() (pk PublicKey) { - t := nextToken() - if err != nil { - return - } else if len(t) != 64 { - err = fmt.Errorf("invalid pubkey length (%d)", len(t)) - return - } - _, err = hex.Decode(pk[:], []byte(t)) - return - } - var parseSpendPolicy func() SpendPolicy - parseSpendPolicy = func() SpendPolicy { - typ := nextToken() - consume('(') - defer consume(')') - switch typ { - case "above": - return PolicyAbove(parseInt(64)) - case "pk": - return PolicyPublicKey(parsePubkey()) - case "thresh": - n := parseInt(8) - consume(',') - consume('[') - var of []SpendPolicy - for err == nil && peek() != ']' { - of = append(of, parseSpendPolicy()) - if peek() != ']' { - consume(',') - } - } - consume(']') - return PolicyThreshold(uint8(n), of) - case "uc": - timelock := parseInt(64) - consume(',') - consume('[') - var pks []PublicKey - for err == nil && peek() != ']' { - pks = append(pks, parsePubkey()) - if peek() != ']' { - consume(',') - } - } - consume(']') - consume(',') - sigsRequired := parseInt(8) - return SpendPolicy{ - PolicyTypeUnlockConditions{ - Timelock: timelock, - PublicKeys: pks, - SignaturesRequired: uint8(sigsRequired), - }, - } - default: - if err == nil { - err = fmt.Errorf("unrecognized policy type %q", typ) - } - return SpendPolicy{} - } - } - - p := parseSpendPolicy() - if err == nil && len(s) > 0 { - err = fmt.Errorf("trailing bytes: %q", s) - } - return p, err -} - -// MarshalText implements encoding.TextMarshaler. -func (p SpendPolicy) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (p *SpendPolicy) UnmarshalText(b []byte) (err error) { - *p, err = ParseSpendPolicy(string(b)) - return -} - -// MarshalJSON implements json.Marshaler. -func (p SpendPolicy) MarshalJSON() ([]byte, error) { - return []byte(`"` + p.String() + `"`), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (p *SpendPolicy) UnmarshalJSON(b []byte) (err error) { - return p.UnmarshalText(bytes.Trim(b, `"`)) -} diff --git a/v2/types/policy_test.go b/v2/types/policy_test.go deleted file mode 100644 index 014290e1..00000000 --- a/v2/types/policy_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package types - -import ( - "encoding/json" - "reflect" - "testing" -) - -func mustParsePublicKey(s string) (pk PublicKey) { - err := pk.UnmarshalJSON([]byte(`"` + s + `"`)) - if err != nil { - panic(err) - } - return -} - -func TestPolicyAddressString(t *testing.T) { - publicKeys := []PublicKey{ - mustParsePublicKey("ed25519:42d33219eb9e7d52d4a4edff215e36535d9d82c9439497a05ab7712193d43282"), - mustParsePublicKey("ed25519:b908477c624679a2dc934a662e43c22844595902f1c8dc29b7f8caf2e0369cc9"), - mustParsePublicKey("ed25519:11aa63482223329fb8b8313da78cc58820f2933cc621e0ef275c305092ea3704"), - } - - tests := []struct { - policy SpendPolicy - want string - }{ - { - PolicyAbove(50), - "addr:f0e864efc7226eb90f79f71caf1d839daf11d1c7f0fb7e25abc3cedd38637f32954986043e6a", - }, - { - PolicyPublicKey(publicKeys[0]), - "addr:4c9de1b2775091af2be8f427b1886f2120cdfe074fb3bc3b6011e281f36309e2468424667b70", - }, - { - AnyoneCanSpend(), - "addr:a1b418e9905dd086e2d0c25ec3675568f849c18f401512d704eceafe1574ee19c48049c5f2b3", - }, - { - PolicyThreshold(0, nil), - "addr:a1b418e9905dd086e2d0c25ec3675568f849c18f401512d704eceafe1574ee19c48049c5f2b3", - }, - { - PolicyThreshold( - 1, - []SpendPolicy{ - PolicyPublicKey(publicKeys[0]), - }, - ), - "addr:88a889bd46420209db5a41b164956e53ff3da9c4b3d1491d81f9c374f742dd3b0a7c72f58aff", - }, - { - PolicyThreshold( - 1, - []SpendPolicy{ - PolicyPublicKey(publicKeys[0]), - PolicyThreshold( - 2, - []SpendPolicy{ - PolicyAbove(50), - PolicyPublicKey(publicKeys[1]), - }, - ), - }, - ), - "addr:2ce609abbd8bc26d0f22c8f6447d3144bc2ae2391f9b09685aca03237329c339ba3ec4a35133", - }, - { - PolicyThreshold( - 2, - []SpendPolicy{ - PolicyPublicKey(publicKeys[0]), - PolicyPublicKey(publicKeys[1]), - PolicyPublicKey(publicKeys[2]), - }, - ), - "addr:0ca4d365f06ebf0de342ed617498521f0c0bcdc133c414428480e8826875c0a565ccaee80fb6", - }, - { - policy: SpendPolicy{PolicyTypeUnlockConditions{ - PublicKeys: []PublicKey{ - publicKeys[0], - }, - SignaturesRequired: 1, - }}, - want: "addr:2f4a4a64712545bde8d38776377da2794d54685284a3768f78884643dad33a9a3822a0f4dc39", - }, - } - for _, tt := range tests { - if got := tt.policy.Address().String(); got != tt.want { - t.Errorf("wrong address for %T(%v)", tt.policy, tt.policy) - } - } -} - -func TestPolicyJSON(t *testing.T) { - publicKeys := []PublicKey{ - mustParsePublicKey("ed25519:42d33219eb9e7d52d4a4edff215e36535d9d82c9439497a05ab7712193d43282"), - mustParsePublicKey("ed25519:b908477c624679a2dc934a662e43c22844595902f1c8dc29b7f8caf2e0369cc9"), - mustParsePublicKey("ed25519:11aa63482223329fb8b8313da78cc58820f2933cc621e0ef275c305092ea3704"), - } - - tests := []struct { - policy SpendPolicy - want string - }{ - { - PolicyAbove(50), - "above(50)", - }, - { - PolicyPublicKey(publicKeys[0]), - "pk(42d33219eb9e7d52d4a4edff215e36535d9d82c9439497a05ab7712193d43282)", - }, - { - AnyoneCanSpend(), - "thresh(0,[])", - }, - { - PolicyThreshold(0, nil), - "thresh(0,[])", - }, - { - PolicyThreshold( - 1, - []SpendPolicy{ - PolicyPublicKey(publicKeys[0]), - }, - ), - "thresh(1,[pk(42d33219eb9e7d52d4a4edff215e36535d9d82c9439497a05ab7712193d43282)])", - }, - { - PolicyThreshold( - 1, - []SpendPolicy{ - PolicyPublicKey(publicKeys[0]), - PolicyThreshold( - 2, - []SpendPolicy{ - PolicyAbove(50), - PolicyPublicKey(publicKeys[1]), - }, - ), - }, - ), - "thresh(1,[pk(42d33219eb9e7d52d4a4edff215e36535d9d82c9439497a05ab7712193d43282),thresh(2,[above(50),pk(b908477c624679a2dc934a662e43c22844595902f1c8dc29b7f8caf2e0369cc9)])])", - }, - { - PolicyThreshold( - 2, - []SpendPolicy{ - PolicyPublicKey(publicKeys[0]), - PolicyThreshold( - 2, - []SpendPolicy{ - PolicyAbove(50), - PolicyPublicKey(publicKeys[1]), - PolicyThreshold( - 2, - []SpendPolicy{ - PolicyAbove(50), - PolicyPublicKey(publicKeys[1]), - }, - ), - }, - ), - PolicyPublicKey(publicKeys[1]), - PolicyPublicKey(publicKeys[2]), - }, - ), "thresh(2,[pk(42d33219eb9e7d52d4a4edff215e36535d9d82c9439497a05ab7712193d43282),thresh(2,[above(50),pk(b908477c624679a2dc934a662e43c22844595902f1c8dc29b7f8caf2e0369cc9),thresh(2,[above(50),pk(b908477c624679a2dc934a662e43c22844595902f1c8dc29b7f8caf2e0369cc9)])]),pk(b908477c624679a2dc934a662e43c22844595902f1c8dc29b7f8caf2e0369cc9),pk(11aa63482223329fb8b8313da78cc58820f2933cc621e0ef275c305092ea3704)])"}, - { - SpendPolicy{PolicyTypeUnlockConditions{ - PublicKeys: []PublicKey{ - publicKeys[0], - }, - SignaturesRequired: 1, - }}, - "uc(0,[42d33219eb9e7d52d4a4edff215e36535d9d82c9439497a05ab7712193d43282],1)", - }, - { - SpendPolicy{PolicyTypeUnlockConditions{}}, - "uc(0,[],0)", - }, - } - - for _, test := range tests { - data, err := json.Marshal(test.policy) - if err != nil { - t.Fatal(err) - } - if string(data) != (`"` + test.want + `"`) { - t.Fatalf("expected %s got %s", test.want, string(data)) - } - - var p SpendPolicy - if err := json.Unmarshal(data, &p); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(p, test.policy) { - t.Fatalf("expected %v got %v", test.policy, p) - } - } - - invalidPolicies := []string{ - "", - ")(", - "aaa(5)", - "above()", - "above(zzz)", - "above(0)trailingbytes", - "pk()", - "pk(zzz)", - "thresh(zzz)", - "thresh(1)", - "thresh(a)", - "thresh(1, [)", - "thresh(1, ][)", - "thresh(1, a)", - `thresh(1, [aaa(50)])`, - `uc(1)`, - `uc(1,)`, - `uc(1, [)`, - `uc(1, ][)`, - `uc(1, [])`, - `uc(1, [],)`, - `uc(1, [],a)`, - `uc(1, [aa], 1)`, - } - for _, test := range invalidPolicies { - var p SpendPolicy - if err := json.Unmarshal([]byte(`"`+test+`"`), &p); err == nil { - t.Fatalf("unmarshal should have errored on input %s", test) - } - } -} diff --git a/v2/types/types.go b/v2/types/types.go deleted file mode 100644 index ba2b73b3..00000000 --- a/v2/types/types.go +++ /dev/null @@ -1,884 +0,0 @@ -// Package types defines the essential types of the Sia blockchain. -package types - -import ( - "bytes" - "crypto/ed25519" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "io" - "math" - "math/big" - "math/bits" - "strconv" - "sync" - "time" - - "github.com/hdevalence/ed25519consensus" - "golang.org/x/crypto/blake2b" - "lukechampine.com/frand" -) - -// EphemeralLeafIndex is used as the LeafIndex of StateElements that are created -// and spent within the same block. Such elements do not require a proof of -// existence. They are, however, assigned a proper index and are incorporated -// into the state accumulator when the block is processed. -const EphemeralLeafIndex = math.MaxUint64 - -// MaxRevisionNumber is used to finalize a FileContract. When a contract's -// RevisionNumber is set to this value, no further revisions are possible. This -// allows contracts to be resolved "early" in some cases; see -// FileContractResolution. -const MaxRevisionNumber = math.MaxUint64 - -// A Hash256 is a generic 256-bit cryptographic hash. -type Hash256 [32]byte - -// An Address is the hash of a public key. -type Address Hash256 - -// VoidAddress is an address whose signing key does not exist. Sending coins to -// this address ensures that they will never be recoverable by anyone. -var VoidAddress Address - -// A BlockID uniquely identifies a block. -type BlockID Hash256 - -// MeetsTarget returns true if bid is not greater than t. -func (bid BlockID) MeetsTarget(t BlockID) bool { - return bytes.Compare(bid[:], t[:]) <= 0 -} - -// A TransactionID uniquely identifies a transaction. -type TransactionID Hash256 - -// A ChainIndex pairs a block's height with its ID. -type ChainIndex struct { - Height uint64 - ID BlockID -} - -// A PublicKey is an Ed25519 public key. -type PublicKey [32]byte - -// A PrivateKey is an Ed25519 private key. -type PrivateKey []byte - -// PublicKey returns the PublicKey corresponding to priv. -func (priv PrivateKey) PublicKey() (pk PublicKey) { - copy(pk[:], priv[32:]) - return -} - -// NewPrivateKeyFromSeed calculates a private key from a seed. -func NewPrivateKeyFromSeed(seed []byte) PrivateKey { - return PrivateKey(ed25519.NewKeyFromSeed(seed)) -} - -// GeneratePrivateKey creates a new private key from a secure entropy source. -func GeneratePrivateKey() PrivateKey { - seed := make([]byte, ed25519.SeedSize) - frand.Read(seed) - pk := NewPrivateKeyFromSeed(seed) - for i := range seed { - seed[i] = 0 - } - return pk -} - -// A Signature is an Ed25519 signature. -type Signature [64]byte - -// SignHash signs h with priv, producing a Signature. -func (priv PrivateKey) SignHash(h Hash256) (s Signature) { - copy(s[:], ed25519.Sign(ed25519.PrivateKey(priv), h[:])) - return -} - -// VerifyHash verifies that s is a valid signature of h by pk. -func (pk PublicKey) VerifyHash(h Hash256, s Signature) bool { - return ed25519consensus.Verify(pk[:], h[:], s[:]) -} - -// A SiacoinOutput is the recipient of some of the siacoins spent in a -// transaction. -type SiacoinOutput struct { - Value Currency - Address Address -} - -// A SiafundOutput is the recipient of some of the siafunds spent in a -// transaction. -type SiafundOutput struct { - Value uint64 - Address Address -} - -// A FileContract is a storage agreement between a renter and a host. It -// consists of a bidirectional payment channel that resolves as either "valid" -// or "missed" depending on whether a valid StorageProof is submitted for the -// contract. -type FileContract struct { - Filesize uint64 - FileMerkleRoot Hash256 - WindowStart uint64 - WindowEnd uint64 - RenterOutput SiacoinOutput - HostOutput SiacoinOutput - MissedHostValue Currency - TotalCollateral Currency - RenterPublicKey PublicKey - HostPublicKey PublicKey - RevisionNumber uint64 - - // signatures cover above fields - RenterSignature Signature - HostSignature Signature -} - -// MissedHostOutput returns the host output that will be created if the contract -// resolves missed. -func (fc FileContract) MissedHostOutput() SiacoinOutput { - return SiacoinOutput{ - Value: fc.MissedHostValue, - Address: fc.HostOutput.Address, - } -} - -// A SiacoinInput spends an unspent SiacoinElement in the state accumulator by -// revealing its public key and signing the transaction. -type SiacoinInput struct { - Parent SiacoinElement - SpendPolicy SpendPolicy - Signatures []Signature -} - -// A SiafundInput spends an unspent SiafundElement in the state accumulator by -// revealing its public key and signing the transaction. Inputs also include a -// ClaimAddress, specifying the recipient of the siacoins that were earned by -// the SiafundElement. -type SiafundInput struct { - Parent SiafundElement - ClaimAddress Address - SpendPolicy SpendPolicy - Signatures []Signature -} - -// A FileContractRevision updates the state of an existing file contract. -type FileContractRevision struct { - Parent FileContractElement - Revision FileContract -} - -// A FileContractResolution closes a file contract's payment channel. There are -// four ways a contract can be resolved: -// -// 1) The renter and host can renew the contract. The old contract is finalized, -// and a portion of its funds are "rolled over" into a new contract. -// -// 2) The host can submit a valid storage proof within the contract's proof -// window. This is considered a "valid" resolution. -// -// 3) The renter and host can sign a final contract revision (a "finalization"), -// setting the contract's revision number to its maximum legal value. A -// finalization can be submitted at any time prior to the contract's WindowEnd. -// This is considered a "valid" resolution. -// -// 4) After the proof window has expired, anyone can submit an empty resolution -// with no storage proof or finalization. This is considered a "missed" -// resolution. -type FileContractResolution struct { - Parent FileContractElement - Renewal FileContractRenewal - StorageProof StorageProof - Finalization FileContract -} - -// HasRenewal returns true if the resolution contains a renewal. -func (fcr *FileContractResolution) HasRenewal() bool { - return fcr.Renewal != (FileContractRenewal{}) -} - -// HasStorageProof returns true if the resolution contains a storage proof. -func (fcr *FileContractResolution) HasStorageProof() bool { - sp := &fcr.StorageProof - return sp.WindowStart != (ChainIndex{}) || len(sp.WindowProof) > 0 || - sp.Leaf != ([64]byte{}) || len(sp.Proof) > 0 -} - -// HasFinalization returns true if the resolution contains a finalization. -func (fcr *FileContractResolution) HasFinalization() bool { - return fcr.Finalization != (FileContract{}) -} - -// A FileContractRenewal renews a file contract. -type FileContractRenewal struct { - FinalRevision FileContract - InitialRevision FileContract - RenterRollover Currency - HostRollover Currency - - // signatures cover above fields - RenterSignature Signature - HostSignature Signature -} - -// A StorageProof asserts the presence of a randomly-selected leaf within the -// Merkle tree of a FileContract's data. -type StorageProof struct { - // Selecting the leaf requires a source of unpredictable entropy; we use the - // ID of the block at the start of the proof window. The StorageProof - // includes this ID, and asserts its presence in the chain via a separate - // Merkle proof. - // - // For convenience, WindowStart is a ChainIndex rather than a BlockID. - // Consequently, WindowStart.Height MUST match the WindowStart field of the - // contract's final revision; otherwise, the prover could use any - // WindowStart, giving them control over the leaf index. - WindowStart ChainIndex - WindowProof []Hash256 - - // The leaf is always 64 bytes, extended with zeros if necessary. - Leaf [64]byte - Proof []Hash256 -} - -// An ElementID uniquely identifies a StateElement. -type ElementID struct { - Source Hash256 // BlockID or TransactionID - Index uint64 -} - -// A StateElement is a generic element within the state accumulator. -type StateElement struct { - ID ElementID - LeafIndex uint64 - MerkleProof []Hash256 -} - -// A SiacoinElement is a volume of siacoins that is created and spent as an -// atomic unit. -type SiacoinElement struct { - StateElement - SiacoinOutput - MaturityHeight uint64 -} - -// A SiafundElement is a volume of siafunds that is created and spent as an -// atomic unit. -type SiafundElement struct { - StateElement - SiafundOutput - ClaimStart Currency // value of SiafundPool when element was created -} - -// A FileContractElement is a storage agreement between a renter and a host. -type FileContractElement struct { - StateElement - FileContract -} - -// An Attestation associates a key-value pair with an identity. For example, -// hosts attest to their network address by setting Key to "HostAnnouncement" -// and Value to their address, thereby allowing renters to discover them. -// Generally, an attestation for a particular key is considered to overwrite any -// previous attestations with the same key. (This allows hosts to announce a new -// network address, for example.) -type Attestation struct { - PublicKey PublicKey - Key string - Value []byte - Signature Signature -} - -// A Transaction transfers value by consuming existing Outputs and creating new -// Outputs. -type Transaction struct { - SiacoinInputs []SiacoinInput - SiacoinOutputs []SiacoinOutput - SiafundInputs []SiafundInput - SiafundOutputs []SiafundOutput - FileContracts []FileContract - FileContractRevisions []FileContractRevision - FileContractResolutions []FileContractResolution - Attestations []Attestation - ArbitraryData []byte - NewFoundationAddress Address - MinerFee Currency -} - -// ID returns the "semantic hash" of the transaction, covering all of the -// transaction's effects, but not incidental data such as signatures or Merkle -// proofs. This ensures that the ID will remain stable (i.e. non-malleable). -// -// To hash all of the data in a transaction, use the EncodeTo method. -func (txn *Transaction) ID() TransactionID { - // NOTE: In general, it is not possible to change a transaction's ID without - // causing it to become invalid, but an exception exists for non-standard - // spend policies. Consider a policy that may be satisfied by either a - // signature or a timelock. If a transaction is broadcast that signs the - // input, and the timelock has expired, then anyone may remove the signature - // from the input without invalidating the transaction. Of course, the net - // result will be the same, so arguably there's little reason to care. You - // only need to worry about this if you're hashing the full transaction data - // for some reason. - h := hasherPool.Get().(*Hasher) - defer hasherPool.Put(h) - h.Reset() - h.E.WriteString("sia/id/transaction") - h.E.WritePrefix(len(txn.SiacoinInputs)) - for _, in := range txn.SiacoinInputs { - in.Parent.ID.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.SiacoinOutputs)) - for _, out := range txn.SiacoinOutputs { - out.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.SiafundInputs)) - for _, in := range txn.SiafundInputs { - in.Parent.ID.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.SiafundOutputs)) - for _, out := range txn.SiafundOutputs { - out.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.FileContracts)) - for _, fc := range txn.FileContracts { - fc.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.FileContractRevisions)) - for _, fcr := range txn.FileContractRevisions { - fcr.Parent.ID.EncodeTo(h.E) - fcr.Revision.EncodeTo(h.E) - } - h.E.WritePrefix(len(txn.FileContractResolutions)) - for _, fcr := range txn.FileContractResolutions { - fcr.Parent.ID.EncodeTo(h.E) - fcr.Renewal.EncodeTo(h.E) - fcr.StorageProof.WindowStart.EncodeTo(h.E) - fcr.Finalization.EncodeTo(h.E) - } - for _, a := range txn.Attestations { - a.EncodeTo(h.E) - } - h.E.WriteBytes(txn.ArbitraryData) - txn.NewFoundationAddress.EncodeTo(h.E) - txn.MinerFee.EncodeTo(h.E) - return TransactionID(h.Sum()) -} - -// DeepCopy returns a copy of txn that does not alias any of its memory. -func (txn *Transaction) DeepCopy() Transaction { - c := *txn - c.SiacoinInputs = append([]SiacoinInput(nil), c.SiacoinInputs...) - for i := range c.SiacoinInputs { - c.SiacoinInputs[i].Parent.MerkleProof = append([]Hash256(nil), c.SiacoinInputs[i].Parent.MerkleProof...) - c.SiacoinInputs[i].Signatures = append([]Signature(nil), c.SiacoinInputs[i].Signatures...) - } - c.SiacoinOutputs = append([]SiacoinOutput(nil), c.SiacoinOutputs...) - c.SiafundInputs = append([]SiafundInput(nil), c.SiafundInputs...) - for i := range c.SiafundInputs { - c.SiafundInputs[i].Parent.MerkleProof = append([]Hash256(nil), c.SiafundInputs[i].Parent.MerkleProof...) - c.SiafundInputs[i].Signatures = append([]Signature(nil), c.SiafundInputs[i].Signatures...) - } - c.SiafundOutputs = append([]SiafundOutput(nil), c.SiafundOutputs...) - c.FileContracts = append([]FileContract(nil), c.FileContracts...) - c.FileContractRevisions = append([]FileContractRevision(nil), c.FileContractRevisions...) - for i := range c.FileContractRevisions { - c.FileContractRevisions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractRevisions[i].Parent.MerkleProof...) - } - c.FileContractResolutions = append([]FileContractResolution(nil), c.FileContractResolutions...) - for i := range c.FileContractResolutions { - c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) - c.FileContractResolutions[i].StorageProof.WindowProof = append([]Hash256(nil), c.FileContractResolutions[i].StorageProof.WindowProof...) - c.FileContractResolutions[i].StorageProof.Proof = append([]Hash256(nil), c.FileContractResolutions[i].StorageProof.Proof...) - } - for i := range c.Attestations { - c.Attestations[i].Value = append([]byte(nil), c.Attestations[i].Value...) - } - c.ArbitraryData = append([]byte(nil), c.ArbitraryData...) - return c -} - -// SiacoinOutputID returns the ID of the siacoin output at index i. -func (txn *Transaction) SiacoinOutputID(i int) ElementID { - return ElementID{ - Source: Hash256(txn.ID()), - Index: uint64(i), - } -} - -// SiafundClaimOutputID returns the ID of the siacoin claim output for the -// siafund input at index i. -func (txn *Transaction) SiafundClaimOutputID(i int) ElementID { - return ElementID{ - Source: Hash256(txn.ID()), - Index: uint64(len(txn.SiacoinOutputs) + i), - } -} - -// SiafundOutputID returns the ID of the siafund output at index i. -func (txn *Transaction) SiafundOutputID(i int) ElementID { - return ElementID{ - Source: Hash256(txn.ID()), - Index: uint64(len(txn.SiacoinOutputs) + len(txn.SiafundInputs) + i), - } -} - -// FileContractID returns the ID of the file contract at index i. -func (txn *Transaction) FileContractID(i int) ElementID { - return ElementID{ - Source: Hash256(txn.ID()), - Index: uint64(len(txn.SiacoinOutputs) + len(txn.SiafundInputs) + len(txn.SiafundOutputs) + i), - } -} - -// EphemeralSiacoinElement returns txn.SiacoinOutputs[i] as an ephemeral -// SiacoinElement. -func (txn *Transaction) EphemeralSiacoinElement(i int) SiacoinElement { - return SiacoinElement{ - StateElement: StateElement{ - ID: txn.SiacoinOutputID(0), - LeafIndex: EphemeralLeafIndex, - }, - SiacoinOutput: txn.SiacoinOutputs[0], - } -} - -// A BlockHeader contains a Block's non-transaction data. -type BlockHeader struct { - Height uint64 - ParentID BlockID - Nonce uint64 - Timestamp time.Time - MinerAddress Address - Commitment Hash256 -} - -// Index returns the header's chain index. -func (h BlockHeader) Index() ChainIndex { - return ChainIndex{ - Height: h.Height, - ID: h.ID(), - } -} - -// ParentIndex returns the index of the header's parent. -func (h BlockHeader) ParentIndex() ChainIndex { - return ChainIndex{ - Height: h.Height - 1, - ID: h.ParentID, - } -} - -// ID returns a hash that uniquely identifies a block. -func (h BlockHeader) ID() BlockID { - // NOTE: although in principle we only need to hash 48 bytes of data, we - // must ensure compatibility with existing Sia mining hardware, which - // expects an 80-byte buffer with the nonce at [32:40]. - buf := make([]byte, 32+8+8+32) - copy(buf[0:], "sia/id/block") - binary.LittleEndian.PutUint64(buf[32:], h.Nonce) - binary.LittleEndian.PutUint64(buf[40:], uint64(h.Timestamp.Unix())) - copy(buf[48:], h.Commitment[:]) - return BlockID(HashBytes(buf)) -} - -// CurrentTimestamp returns the current time, rounded to the nearest second. The -// time zone is set to UTC. -func CurrentTimestamp() time.Time { return time.Now().Round(time.Second).UTC() } - -// A Block is a set of transactions grouped under a header. -type Block struct { - Header BlockHeader - Transactions []Transaction -} - -// ID returns a hash that uniquely identifies a block. It is equivalent to -// b.Header.ID(). -func (b *Block) ID() BlockID { return b.Header.ID() } - -// Index returns the block's chain index. It is equivalent to b.Header.Index(). -func (b *Block) Index() ChainIndex { return b.Header.Index() } - -// MinerOutputID returns the output ID of the miner payout. -func (b *Block) MinerOutputID() ElementID { - return ElementID{ - Source: Hash256(b.ID()), - Index: 0, - } -} - -// FoundationOutputID returns the output ID of the foundation payout. A -// Foundation subsidy output is only created every 4380 blocks after the -// hardfork at block 298000. -func (b *Block) FoundationOutputID() ElementID { - return ElementID{ - Source: Hash256(b.ID()), - Index: 1, - } -} - -// Work represents a quantity of work. -type Work struct { - // The representation is the expected number of hashes required to produce a - // given hash, in big-endian order. - NumHashes [32]byte -} - -// Add returns w+v, wrapping on overflow. -func (w Work) Add(v Work) Work { - var r Work - var sum, c uint64 - for i := 24; i >= 0; i -= 8 { - wi := binary.BigEndian.Uint64(w.NumHashes[i:]) - vi := binary.BigEndian.Uint64(v.NumHashes[i:]) - sum, c = bits.Add64(wi, vi, c) - binary.BigEndian.PutUint64(r.NumHashes[i:], sum) - } - return r -} - -// Sub returns w-v, wrapping on underflow. -func (w Work) Sub(v Work) Work { - var r Work - var sum, c uint64 - for i := 24; i >= 0; i -= 8 { - wi := binary.BigEndian.Uint64(w.NumHashes[i:]) - vi := binary.BigEndian.Uint64(v.NumHashes[i:]) - sum, c = bits.Sub64(wi, vi, c) - binary.BigEndian.PutUint64(r.NumHashes[i:], sum) - } - return r -} - -// Mul64 returns w*v, wrapping on overflow. -func (w Work) Mul64(v uint64) Work { - var r Work - var c uint64 - for i := 24; i >= 0; i -= 8 { - wi := binary.BigEndian.Uint64(w.NumHashes[i:]) - hi, prod := bits.Mul64(wi, v) - prod, cc := bits.Add64(prod, c, 0) - c = hi + cc - binary.BigEndian.PutUint64(r.NumHashes[i:], prod) - } - return r -} - -// Div64 returns w/v. -func (w Work) Div64(v uint64) Work { - var r Work - var quo, rem uint64 - for i := 0; i < len(w.NumHashes); i += 8 { - wi := binary.BigEndian.Uint64(w.NumHashes[i:]) - quo, rem = bits.Div64(rem, wi, v) - binary.BigEndian.PutUint64(r.NumHashes[i:], quo) - } - return r -} - -// Cmp compares two work values. -func (w Work) Cmp(v Work) int { - return bytes.Compare(w.NumHashes[:], v.NumHashes[:]) -} - -// WorkRequiredForHash estimates how much work was required to produce the given -// id. Note that the mapping is not injective; many different ids may require -// the same expected amount of Work. -func WorkRequiredForHash(id BlockID) Work { - if id == (BlockID{}) { - // This should never happen as long as inputs are properly validated and - // the laws of physics are intact. - panic("impossibly good BlockID") - } - // As a special case, this hash requires the maximum possible amount of - // Work. (Otherwise, the division would produce 2^256, which overflows our - // representation.) - if id == ([32]byte{31: 1}) { - return Work{ - NumHashes: [32]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - }, - } - } - - // To get the expected number of hashes required, simply divide 2^256 by id. - // - // TODO: write a zero-alloc uint256 division instead of using big.Int - maxTarget := new(big.Int).Lsh(big.NewInt(1), 256) - idInt := new(big.Int).SetBytes(id[:]) - quo := maxTarget.Div(maxTarget, idInt) - var w Work - quo.FillBytes(w.NumHashes[:]) - return w -} - -// HashRequiringWork returns the best BlockID that the given amount of Work -// would be expected to produce. Note that many different BlockIDs may require -// the same amount of Work; this function returns the lowest of them. -func HashRequiringWork(w Work) BlockID { - if w.NumHashes == ([32]byte{}) { - panic("no hash requires zero work") - } - // As a special case, 1 Work produces this hash. (Otherwise, the division - // would produce 2^256, which overflows our representation.) - if w.NumHashes == ([32]byte{31: 1}) { - return BlockID{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } - } - maxTarget := new(big.Int).Lsh(big.NewInt(1), 256) - workInt := new(big.Int).SetBytes(w.NumHashes[:]) - quo := maxTarget.Div(maxTarget, workInt) - var id BlockID - quo.FillBytes(id[:]) - return id -} - -// HashBytes computes the hash of b using Sia's hash function. -func HashBytes(b []byte) Hash256 { return blake2b.Sum256(b) } - -// Pool for reducing heap allocations when hashing. This is only necessary -// because blake2b.New256 returns a hash.Hash interface, which prevents the -// compiler from doing escape analysis. Can be removed if we switch to an -// implementation whose constructor returns a concrete type. -var hasherPool = &sync.Pool{New: func() interface{} { return NewHasher() }} - -// Implementations of fmt.Stringer, encoding.Text(Un)marshaler, and json.(Un)marshaler - -func stringerHex(prefix string, data []byte) string { - return prefix + ":" + hex.EncodeToString(data[:]) -} - -func marshalHex(prefix string, data []byte) ([]byte, error) { - return []byte(stringerHex(prefix, data)), nil -} - -func unmarshalHex(dst []byte, prefix string, data []byte) error { - n, err := hex.Decode(dst, bytes.TrimPrefix(data, []byte(prefix+":"))) - if n < len(dst) { - err = io.EOF - } - if err != nil { - return fmt.Errorf("decoding %v: failed: %w", prefix, err) - } - return nil -} - -func marshalJSONHex(prefix string, data []byte) ([]byte, error) { - return []byte(`"` + stringerHex(prefix, data) + `"`), nil -} - -func unmarshalJSONHex(dst []byte, prefix string, data []byte) error { - return unmarshalHex(dst, prefix, bytes.Trim(data, `"`)) -} - -// String implements fmt.Stringer. -func (h Hash256) String() string { return stringerHex("h", h[:]) } - -// MarshalText implements encoding.TextMarshaler. -func (h Hash256) MarshalText() ([]byte, error) { return marshalHex("h", h[:]) } - -// UnmarshalText implements encoding.TextUnmarshaler. -func (h *Hash256) UnmarshalText(b []byte) error { return unmarshalHex(h[:], "h", b) } - -// MarshalJSON implements json.Marshaler. -func (h Hash256) MarshalJSON() ([]byte, error) { return marshalJSONHex("h", h[:]) } - -// UnmarshalJSON implements json.Unmarshaler. -func (h *Hash256) UnmarshalJSON(b []byte) error { return unmarshalJSONHex(h[:], "h", b) } - -// String implements fmt.Stringer. -func (ci ChainIndex) String() string { - // use the 4 least-significant bytes of ID -- in a mature chain, the - // most-significant bytes will be zeros - return fmt.Sprintf("%d::%x", ci.Height, ci.ID[len(ci.ID)-4:]) -} - -// MarshalText implements encoding.TextMarshaler. -func (ci ChainIndex) MarshalText() ([]byte, error) { - return []byte(fmt.Sprintf("%d::%x", ci.Height, ci.ID[:])), nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (ci *ChainIndex) UnmarshalText(b []byte) (err error) { - parts := bytes.Split(b, []byte("::")) - if len(parts) != 2 { - return fmt.Errorf("decoding :: failed: wrong number of separators") - } else if ci.Height, err = strconv.ParseUint(string(parts[0]), 10, 64); err != nil { - return fmt.Errorf("decoding :: failed: %w", err) - } else if n, err := hex.Decode(ci.ID[:], parts[1]); err != nil { - return fmt.Errorf("decoding :: failed: %w", err) - } else if n < len(ci.ID) { - return fmt.Errorf("decoding :: failed: %w", io.EOF) - } - return nil -} - -// ParseChainIndex parses a chain index from a string. -func ParseChainIndex(s string) (ci ChainIndex, err error) { - err = ci.UnmarshalText([]byte(s)) - return -} - -// String implements fmt.Stringer. -func (eid ElementID) String() string { - return fmt.Sprintf("elem:%x:%v", eid.Source[:], eid.Index) -} - -// MarshalText implements encoding.TextMarshaler. -func (eid ElementID) MarshalText() ([]byte, error) { return []byte(eid.String()), nil } - -// UnmarshalText implements encoding.TextUnmarshaler. -func (eid *ElementID) UnmarshalText(b []byte) (err error) { - parts := bytes.Split(b, []byte(":")) - if len(parts) != 3 { - return fmt.Errorf("decoding : failed: wrong number of separators") - } else if n, err := hex.Decode(eid.Source[:], parts[1]); err != nil { - return fmt.Errorf("decoding : failed: %w", err) - } else if n < len(eid.Source) { - return fmt.Errorf("decoding : failed: %w", io.EOF) - } else if eid.Index, err = strconv.ParseUint(string(parts[2]), 10, 64); err != nil { - return fmt.Errorf("decoding : failed: %w", err) - } - return nil -} - -// String implements fmt.Stringer. -func (a Address) String() string { - checksum := HashBytes(a[:]) - return stringerHex("addr", append(a[:], checksum[:6]...)) -} - -// MarshalText implements encoding.TextMarshaler. -func (a Address) MarshalText() ([]byte, error) { return []byte(a.String()), nil } - -// UnmarshalText implements encoding.TextUnmarshaler. -func (a *Address) UnmarshalText(b []byte) (err error) { - withChecksum := make([]byte, 32+6) - n, err := hex.Decode(withChecksum, bytes.TrimPrefix(b, []byte("addr:"))) - if err != nil { - err = fmt.Errorf("decoding addr: failed: %w", err) - } else if n != len(withChecksum) { - err = fmt.Errorf("decoding addr: failed: %w", io.EOF) - } else if checksum := HashBytes(withChecksum[:32]); !bytes.Equal(checksum[:6], withChecksum[32:]) { - err = errors.New("bad checksum") - } - copy(a[:], withChecksum[:32]) - return -} - -// MarshalJSON implements json.Marshaler. -func (a Address) MarshalJSON() ([]byte, error) { - checksum := HashBytes(a[:]) - return marshalJSONHex("addr", append(a[:], checksum[:6]...)) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (a *Address) UnmarshalJSON(b []byte) (err error) { - return a.UnmarshalText(bytes.Trim(b, `"`)) -} - -// ParseAddress parses an address from a prefixed hex encoded string. -func ParseAddress(s string) (a Address, err error) { - err = a.UnmarshalText([]byte(s)) - return -} - -// String implements fmt.Stringer. -func (bid BlockID) String() string { return stringerHex("bid", bid[:]) } - -// MarshalText implements encoding.TextMarshaler. -func (bid BlockID) MarshalText() ([]byte, error) { return marshalHex("bid", bid[:]) } - -// UnmarshalText implements encoding.TextUnmarshaler. -func (bid *BlockID) UnmarshalText(b []byte) error { return unmarshalHex(bid[:], "bid", b) } - -// MarshalJSON implements json.Marshaler. -func (bid BlockID) MarshalJSON() ([]byte, error) { return marshalJSONHex("bid", bid[:]) } - -// UnmarshalJSON implements json.Unmarshaler. -func (bid *BlockID) UnmarshalJSON(b []byte) error { return unmarshalJSONHex(bid[:], "bid", b) } - -// String implements fmt.Stringer. -func (pk PublicKey) String() string { return stringerHex("ed25519", pk[:]) } - -// MarshalText implements encoding.TextMarshaler. -func (pk PublicKey) MarshalText() ([]byte, error) { return marshalHex("ed25519", pk[:]) } - -// UnmarshalText implements encoding.TextUnmarshaler. -func (pk *PublicKey) UnmarshalText(b []byte) error { return unmarshalHex(pk[:], "ed25519", b) } - -// MarshalJSON implements json.Marshaler. -func (pk PublicKey) MarshalJSON() ([]byte, error) { return marshalJSONHex("ed25519", pk[:]) } - -// UnmarshalJSON implements json.Unmarshaler. -func (pk *PublicKey) UnmarshalJSON(b []byte) error { return unmarshalJSONHex(pk[:], "ed25519", b) } - -// String implements fmt.Stringer. -func (tid TransactionID) String() string { return stringerHex("txid", tid[:]) } - -// MarshalText implements encoding.TextMarshaler. -func (tid TransactionID) MarshalText() ([]byte, error) { return marshalHex("txid", tid[:]) } - -// UnmarshalText implements encoding.TextUnmarshaler. -func (tid *TransactionID) UnmarshalText(b []byte) error { return unmarshalHex(tid[:], "txid", b) } - -// MarshalJSON implements json.Marshaler. -func (tid TransactionID) MarshalJSON() ([]byte, error) { return marshalJSONHex("txid", tid[:]) } - -// UnmarshalJSON implements json.Unmarshaler. -func (tid *TransactionID) UnmarshalJSON(b []byte) error { return unmarshalJSONHex(tid[:], "txid", b) } - -// String implements fmt.Stringer. -func (sig Signature) String() string { return stringerHex("sig", sig[:]) } - -// MarshalText implements encoding.TextMarshaler. -func (sig Signature) MarshalText() ([]byte, error) { return marshalHex("sig", sig[:]) } - -// UnmarshalText implements encoding.TextUnmarshaler. -func (sig *Signature) UnmarshalText(b []byte) error { return unmarshalHex(sig[:], "sig", b) } - -// MarshalJSON implements json.Marshaler. -func (sig Signature) MarshalJSON() ([]byte, error) { return marshalJSONHex("sig", sig[:]) } - -// UnmarshalJSON implements json.Unmarshaler. -func (sig *Signature) UnmarshalJSON(b []byte) error { return unmarshalJSONHex(sig[:], "sig", b) } - -// String implements fmt.Stringer. -func (w Work) String() string { return new(big.Int).SetBytes(w.NumHashes[:]).String() } - -// MarshalText implements encoding.TextMarshaler. -func (w Work) MarshalText() ([]byte, error) { - return new(big.Int).SetBytes(w.NumHashes[:]).MarshalText() -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (w *Work) UnmarshalText(b []byte) error { - i := new(big.Int) - if err := i.UnmarshalText(b); err != nil { - return err - } else if i.Sign() < 0 { - return errors.New("value cannot be negative") - } else if i.BitLen() > 256 { - return errors.New("value overflows Work representation") - } - i.FillBytes(w.NumHashes[:]) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (w *Work) UnmarshalJSON(b []byte) error { - return w.UnmarshalText(bytes.Trim(b, `"`)) -} - -// MarshalJSON implements json.Marshaler. -func (w Work) MarshalJSON() ([]byte, error) { - js, err := new(big.Int).SetBytes(w.NumHashes[:]).MarshalJSON() - return []byte(`"` + string(js) + `"`), err -} diff --git a/v2/types/types_test.go b/v2/types/types_test.go deleted file mode 100644 index 88dae0bd..00000000 --- a/v2/types/types_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package types - -import ( - "testing" -) - -func TestWork(t *testing.T) { - tests := []struct { - id BlockID - exp string - }{ - {BlockID{0b11111111}, "1"}, - {BlockID{0b10000000}, "2"}, - {BlockID{0b01000000}, "4"}, - {BlockID{0b00100000}, "8"}, - {BlockID{0b00010000}, "16"}, - {BlockID{0b00001000}, "32"}, - {BlockID{0b00000100}, "64"}, - {BlockID{0b00000010}, "128"}, - {BlockID{0b00000001}, "256"}, - {BlockID{0, 0x28, 0x7E}, "1618"}, // approx 7.154 * 10^73 - {BlockID{10: 1}, "309485009821345068724781056"}, // 2^88 - } - for _, test := range tests { - got := WorkRequiredForHash(test.id) - if got.String() != test.exp { - t.Errorf("expected %v, got %v", test.exp, got) - } - } -} - -func BenchmarkWork(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - WorkRequiredForHash(BlockID{1}) - } -} - -func BenchmarkTransactionID(b *testing.B) { - txn := Transaction{ - SiacoinInputs: make([]SiacoinInput, 10), - SiacoinOutputs: make([]SiacoinOutput, 10), - SiafundInputs: make([]SiafundInput, 10), - SiafundOutputs: make([]SiafundOutput, 10), - } - for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].SpendPolicy = AnyoneCanSpend() - } - for i := range txn.SiafundInputs { - txn.SiafundInputs[i].SpendPolicy = AnyoneCanSpend() - } - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _ = txn.ID() - } -} - -func BenchmarkBlockHeaderID(b *testing.B) { - var bh BlockHeader - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _ = bh.ID() - } -} From 298344a4682add0fc2f7c0192cbb0a3f7ca6d980 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 30 May 2023 23:37:41 -0400 Subject: [PATCH 02/53] types: Add v2 types --- chain/manager_test.go | 6 +- consensus/update_test.go | 6 +- consensus/validation.go | 15 +- consensus/validation_test.go | 6 +- gateway/encoding.go | 20 +- gateway/peer.go | 22 +- types/encoding.go | 546 ++++++++++++++++++++++++++++++++++- types/hash.go | 16 +- types/policy.go | 304 +++++++++++++++++++ types/types.go | 392 ++++++++++++++++++++++--- 10 files changed, 1254 insertions(+), 79 deletions(-) create mode 100644 types/policy.go diff --git a/chain/manager_test.go b/chain/manager_test.go index 280ab48a..1d22a021 100644 --- a/chain/manager_test.go +++ b/chain/manager_test.go @@ -106,7 +106,7 @@ func TestTxPool(t *testing.T) { giftPrivateKey := types.GeneratePrivateKey() giftPublicKey := giftPrivateKey.PublicKey() - giftAddress := giftPublicKey.StandardAddress() + giftAddress := types.StandardUnlockHash(giftPublicKey) giftAmountSC := types.Siacoins(100) giftTxn := types.Transaction{ SiacoinOutputs: []types.SiacoinOutput{ @@ -137,7 +137,7 @@ func TestTxPool(t *testing.T) { parentTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: giftTxn.SiacoinOutputID(0), - UnlockConditions: giftPublicKey.StandardUnlockConditions(), + UnlockConditions: types.StandardUnlockConditions(giftPublicKey), }}, SiacoinOutputs: []types.SiacoinOutput{{ Address: giftAddress, @@ -155,7 +155,7 @@ func TestTxPool(t *testing.T) { childTxn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: parentTxn.SiacoinOutputID(0), - UnlockConditions: giftPublicKey.StandardUnlockConditions(), + UnlockConditions: types.StandardUnlockConditions(giftPublicKey), }}, MinerFees: []types.Currency{giftAmountSC}, } diff --git a/consensus/update_test.go b/consensus/update_test.go index d5125fd1..d4e2a55b 100644 --- a/consensus/update_test.go +++ b/consensus/update_test.go @@ -17,7 +17,7 @@ func TestApplyBlock(t *testing.T) { giftPrivateKey := types.GeneratePrivateKey() giftPublicKey := giftPrivateKey.PublicKey() - giftAddress := giftPublicKey.StandardAddress() + giftAddress := types.StandardUnlockHash(giftPublicKey) giftAmountSC := types.Siacoins(100) giftAmountSF := uint64(100) giftTxn := types.Transaction{ @@ -94,12 +94,12 @@ func TestApplyBlock(t *testing.T) { txnB2 := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ ParentID: giftTxn.SiacoinOutputID(0), - UnlockConditions: giftPublicKey.StandardUnlockConditions(), + UnlockConditions: types.StandardUnlockConditions(giftPublicKey), }}, SiafundInputs: []types.SiafundInput{{ ParentID: giftTxn.SiafundOutputID(0), ClaimAddress: types.VoidAddress, - UnlockConditions: giftPublicKey.StandardUnlockConditions(), + UnlockConditions: types.StandardUnlockConditions(giftPublicKey), }}, SiacoinOutputs: []types.SiacoinOutput{ {Value: giftAmountSC.Div64(2), Address: giftAddress}, diff --git a/consensus/validation.go b/consensus/validation.go index 16eb72a8..f94b9e24 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -5,20 +5,21 @@ import ( "errors" "fmt" "math/bits" + "time" "go.sia.tech/core/internal/blake2b" "go.sia.tech/core/types" ) -// ValidateHeader validates h in the context of s. -func ValidateHeader(s State, h types.BlockHeader) error { - if h.ParentID != s.Index.ID { +// ValidateHeader validates a header in the context of s. +func ValidateHeader(s State, parentID types.BlockID, timestamp time.Time, nonce uint64, id types.BlockID) error { + if parentID != s.Index.ID { return errors.New("wrong parent ID") - } else if h.Timestamp.Before(s.medianTimestamp()) { + } else if timestamp.Before(s.medianTimestamp()) { return errors.New("timestamp is too far in the past") - } else if h.Nonce%s.NonceFactor() != 0 { + } else if nonce%s.NonceFactor() != 0 { return errors.New("nonce is not divisible by required factor") - } else if h.ID().CmpWork(s.ChildTarget) < 0 { + } else if id.CmpWork(s.ChildTarget) < 0 { return errors.New("insufficient work") } return nil @@ -61,7 +62,7 @@ func ValidateOrphan(s State, b types.Block) error { // TODO: calculate size more efficiently if uint64(types.EncodedLen(b)) > s.MaxBlockWeight() { return errors.New("block exceeds maximum weight") - } else if err := ValidateHeader(s, b.Header()); err != nil { + } else if err := ValidateHeader(s, b.ParentID, b.Timestamp, b.Nonce, b.ID()); err != nil { return err } else if err := validateMinerPayouts(s, b); err != nil { return err diff --git a/consensus/validation_test.go b/consensus/validation_test.go index c2da47ab..ed992b40 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -43,7 +43,7 @@ func TestValidateBlock(t *testing.T) { giftPublicKey := giftPrivateKey.PublicKey() renterPublicKey := renterPrivateKey.PublicKey() hostPublicKey := hostPrivateKey.PublicKey() - giftAddress := giftPublicKey.StandardAddress() + giftAddress := types.StandardUnlockHash(giftPublicKey) giftAmountSC := types.Siacoins(100) giftAmountSF := uint64(100) giftFC := rhpv2.PrepareContractFormation(renterPublicKey, hostPublicKey, types.Siacoins(1), types.Siacoins(1), 100, rhpv2.HostSettings{}, types.VoidAddress) @@ -100,12 +100,12 @@ func TestValidateBlock(t *testing.T) { Transactions: []types.Transaction{{ SiacoinInputs: []types.SiacoinInput{{ ParentID: giftTxn.SiacoinOutputID(0), - UnlockConditions: giftPublicKey.StandardUnlockConditions(), + UnlockConditions: types.StandardUnlockConditions(giftPublicKey), }}, SiafundInputs: []types.SiafundInput{{ ParentID: giftTxn.SiafundOutputID(0), ClaimAddress: types.VoidAddress, - UnlockConditions: giftPublicKey.StandardUnlockConditions(), + UnlockConditions: types.StandardUnlockConditions(giftPublicKey), }}, SiacoinOutputs: []types.SiacoinOutput{ {Value: giftAmountSC.Sub(fc.Payout), Address: giftAddress}, diff --git a/gateway/encoding.go b/gateway/encoding.go index ea784976..8049d910 100644 --- a/gateway/encoding.go +++ b/gateway/encoding.go @@ -40,6 +40,20 @@ func (h *Header) decodeFrom(d *types.Decoder) { h.NetAddress = d.ReadString() } +func (h *BlockHeader) encodeTo(e *types.Encoder) { + h.ParentID.EncodeTo(e) + e.WriteUint64(h.Nonce) + e.WriteTime(h.Timestamp) + h.MerkleRoot.EncodeTo(e) +} + +func (h *BlockHeader) decodeFrom(d *types.Decoder) { + h.ParentID.DecodeFrom(d) + h.Nonce = d.ReadUint64() + h.Timestamp = d.ReadTime() + h.MerkleRoot.DecodeFrom(d) +} + type object interface { encodeRequest(e *types.Encoder) decodeRequest(d *types.Decoder) @@ -147,12 +161,12 @@ func (r *RPCSendBlk) maxResponseLen() int { return 5e6 } // RPCRelayHeader relays a header. type RPCRelayHeader struct { - Header types.BlockHeader + Header BlockHeader emptyResponse } -func (r *RPCRelayHeader) encodeRequest(e *types.Encoder) { r.Header.EncodeTo(e) } -func (r *RPCRelayHeader) decodeRequest(d *types.Decoder) { r.Header.DecodeFrom(d) } +func (r *RPCRelayHeader) encodeRequest(e *types.Encoder) { r.Header.encodeTo(e) } +func (r *RPCRelayHeader) decodeRequest(d *types.Decoder) { r.Header.decodeFrom(d) } func (r *RPCRelayHeader) maxRequestLen() int { return 32 + 8 + 8 + 32 } // RPCRelayTransactionSet relays a transaction set. diff --git a/gateway/peer.go b/gateway/peer.go index fd3c4e0d..3eef8c4e 100644 --- a/gateway/peer.go +++ b/gateway/peer.go @@ -39,6 +39,24 @@ func validateHeader(ours, theirs Header) error { return nil } +// A BlockHeader contains a Block's non-transaction data. +type BlockHeader struct { + ParentID types.BlockID + Nonce uint64 + Timestamp time.Time + MerkleRoot types.Hash256 +} + +// ID returns a hash that uniquely identifies the block. +func (bh BlockHeader) ID() types.BlockID { + h := types.NewHasher() + bh.ParentID.EncodeTo(h.E) + h.E.WriteUint64(bh.Nonce) + h.E.WriteTime(bh.Timestamp) + bh.MerkleRoot.EncodeTo(h.E) + return types.BlockID(h.Sum()) +} + // A Peer is a connected gateway peer. type Peer struct { Addr string @@ -87,7 +105,7 @@ type RPCHandler interface { PeersForShare() []string Block(id types.BlockID) (types.Block, error) BlocksForHistory(history [32]types.BlockID) ([]types.Block, bool, error) - RelayHeader(h types.BlockHeader, origin *Peer) + RelayHeader(h BlockHeader, origin *Peer) RelayTransactionSet(txns []types.Transaction, origin *Peer) } @@ -199,7 +217,7 @@ func (p *Peer) SendBlock(id types.BlockID, timeout time.Duration) (types.Block, } // RelayHeader relays a header to the peer. -func (p *Peer) RelayHeader(h types.BlockHeader, timeout time.Duration) error { +func (p *Peer) RelayHeader(h BlockHeader, timeout time.Duration) error { return p.callRPC(&RPCRelayHeader{Header: h}, timeout) } diff --git a/types/encoding.go b/types/encoding.go index e4d79b66..f4b065bf 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -293,14 +293,6 @@ func (index ChainIndex) EncodeTo(e *Encoder) { index.ID.EncodeTo(e) } -// EncodeTo implements types.EncoderTo. -func (h BlockHeader) EncodeTo(e *Encoder) { - h.ParentID.EncodeTo(e) - e.WriteUint64(h.Nonce) - e.WriteTime(h.Timestamp) - h.MerkleRoot.EncodeTo(e) -} - // EncodeTo implements types.EncoderTo. func (sco SiacoinOutput) EncodeTo(e *Encoder) { sco.Value.EncodeTo(e) @@ -486,6 +478,276 @@ func (b Block) EncodeTo(e *Encoder) { } } +// EncodeTo implements types.EncoderTo. +func (p SpendPolicy) EncodeTo(e *Encoder) { + const ( + version = 1 + + opInvalid = iota + opAbove + opPublicKey + opThreshold + opUnlockConditions + ) + + var writePolicy func(SpendPolicy) + writePolicy = func(p SpendPolicy) { + switch p := p.Type.(type) { + case PolicyTypeAbove: + e.WriteUint8(opAbove) + e.WriteUint64(uint64(p)) + case PolicyTypePublicKey: + e.WriteUint8(opPublicKey) + PublicKey(p).EncodeTo(e) + case PolicyTypeThreshold: + e.WriteUint8(opThreshold) + e.WriteUint8(p.N) + e.WriteUint8(uint8(len(p.Of))) + for i := range p.Of { + writePolicy(p.Of[i]) + } + case PolicyTypeUnlockConditions: + e.WriteUint8(opUnlockConditions) + e.WriteUint64(p.Timelock) + e.WriteUint8(uint8(len(p.PublicKeys))) + for i := range p.PublicKeys { + p.PublicKeys[i].EncodeTo(e) + } + e.WriteUint8(p.SignaturesRequired) + default: + panic(fmt.Sprintf("unhandled policy type %T", p)) + } + } + + e.WriteUint8(version) + writePolicy(p) +} + +// EncodeTo implements types.EncoderTo. +func (se StateElement) EncodeTo(e *Encoder) { + se.ID.EncodeTo(e) + e.WriteUint64(se.LeafIndex) + e.WritePrefix(len(se.MerkleProof)) + for _, p := range se.MerkleProof { + p.EncodeTo(e) + } +} + +// EncodeTo implements types.EncoderTo. +func (in V2SiacoinInput) EncodeTo(e *Encoder) { + in.Parent.EncodeTo(e) + in.SpendPolicy.EncodeTo(e) + e.WritePrefix(len(in.Signatures)) + for _, sig := range in.Signatures { + sig.EncodeTo(e) + } +} + +// EncodeTo implements types.EncoderTo. +func (sce SiacoinElement) EncodeTo(e *Encoder) { + sce.StateElement.EncodeTo(e) + sce.SiacoinOutput.EncodeTo(e) + e.WriteUint64(sce.MaturityHeight) +} + +// EncodeTo implements types.EncoderTo. +func (in V2SiafundInput) EncodeTo(e *Encoder) { + in.Parent.EncodeTo(e) + in.ClaimAddress.EncodeTo(e) + in.SpendPolicy.EncodeTo(e) + e.WritePrefix(len(in.Signatures)) + for _, sig := range in.Signatures { + sig.EncodeTo(e) + } +} + +// EncodeTo implements types.EncoderTo. +func (sfe SiafundElement) EncodeTo(e *Encoder) { + sfe.StateElement.EncodeTo(e) + sfe.SiafundOutput.EncodeTo(e) + sfe.ClaimStart.EncodeTo(e) +} + +// EncodeTo implements types.EncoderTo. +func (fc V2FileContract) EncodeTo(e *Encoder) { + e.WriteUint64(fc.Filesize) + fc.FileMerkleRoot.EncodeTo(e) + e.WriteUint64(fc.WindowStart) + e.WriteUint64(fc.WindowEnd) + fc.RenterOutput.EncodeTo(e) + fc.HostOutput.EncodeTo(e) + fc.MissedHostValue.EncodeTo(e) + fc.TotalCollateral.EncodeTo(e) + fc.RenterPublicKey.EncodeTo(e) + fc.HostPublicKey.EncodeTo(e) + e.WriteUint64(fc.RevisionNumber) + fc.RenterSignature.EncodeTo(e) + fc.HostSignature.EncodeTo(e) +} + +// EncodeTo implements types.EncoderTo. +func (fce FileContractElement) EncodeTo(e *Encoder) { + fce.StateElement.EncodeTo(e) + fce.V2FileContract.EncodeTo(e) +} + +// EncodeTo implements types.EncoderTo. +func (rev V2FileContractRevision) EncodeTo(e *Encoder) { + rev.Parent.EncodeTo(e) + rev.Revision.EncodeTo(e) +} + +// EncodeTo implements types.EncoderTo. +func (ren FileContractRenewal) EncodeTo(e *Encoder) { + ren.FinalRevision.EncodeTo(e) + ren.InitialRevision.EncodeTo(e) + ren.RenterRollover.EncodeTo(e) + ren.HostRollover.EncodeTo(e) + ren.RenterSignature.EncodeTo(e) + ren.HostSignature.EncodeTo(e) +} + +// EncodeTo implements types.EncoderTo. +func (sp V2StorageProof) EncodeTo(e *Encoder) { + sp.WindowStart.EncodeTo(e) + e.WritePrefix(len(sp.WindowProof)) + for _, p := range sp.WindowProof { + p.EncodeTo(e) + } + e.Write(sp.Leaf[:]) + e.WritePrefix(len(sp.Proof)) + for _, p := range sp.Proof { + p.EncodeTo(e) + } +} + +// EncodeTo implements types.EncoderTo. +func (res FileContractResolution) EncodeTo(e *Encoder) { + res.Parent.EncodeTo(e) + var fields uint8 + for i, b := range [...]bool{ + res.HasRenewal(), + res.HasStorageProof(), + res.HasFinalization(), + } { + if b { + fields |= 1 << i + } + } + e.WriteUint8(fields) + if fields&(1<<0) != 0 { + res.Renewal.EncodeTo(e) + } + if fields&(1<<1) != 0 { + res.StorageProof.EncodeTo(e) + } + if fields&(1<<2) != 0 { + res.Finalization.EncodeTo(e) + } +} + +// EncodeTo implements types.EncoderTo. +func (a Attestation) EncodeTo(e *Encoder) { + a.PublicKey.EncodeTo(e) + e.WriteString(a.Key) + e.WriteBytes(a.Value) + a.Signature.EncodeTo(e) +} + +// EncodeTo implements types.EncoderTo. +func (txn V2Transaction) EncodeTo(e *Encoder) { + const version = 2 + e.WriteUint8(version) + + var fields uint64 + for i, b := range [...]bool{ + len(txn.SiacoinInputs) != 0, + len(txn.SiacoinOutputs) != 0, + len(txn.SiafundInputs) != 0, + len(txn.SiafundOutputs) != 0, + len(txn.FileContracts) != 0, + len(txn.FileContractRevisions) != 0, + len(txn.FileContractResolutions) != 0, + len(txn.Attestations) != 0, + len(txn.ArbitraryData) != 0, + txn.NewFoundationAddress != VoidAddress, + !txn.MinerFee.IsZero(), + } { + if b { + fields |= 1 << i + } + } + e.WriteUint64(fields) + + if fields&(1<<0) != 0 { + e.WritePrefix(len(txn.SiacoinInputs)) + for _, in := range txn.SiacoinInputs { + in.EncodeTo(e) + } + } + if fields&(1<<1) != 0 { + e.WritePrefix(len(txn.SiacoinOutputs)) + for _, out := range txn.SiacoinOutputs { + out.EncodeTo(e) + } + } + if fields&(1<<2) != 0 { + e.WritePrefix(len(txn.SiafundInputs)) + for _, in := range txn.SiafundInputs { + in.EncodeTo(e) + } + } + if fields&(1<<3) != 0 { + e.WritePrefix(len(txn.SiafundOutputs)) + for _, out := range txn.SiafundOutputs { + out.EncodeTo(e) + } + } + if fields&(1<<4) != 0 { + e.WritePrefix(len(txn.FileContracts)) + for _, fc := range txn.FileContracts { + fc.EncodeTo(e) + } + } + if fields&(1<<5) != 0 { + e.WritePrefix(len(txn.FileContractRevisions)) + for _, rev := range txn.FileContractRevisions { + rev.EncodeTo(e) + } + } + if fields&(1<<6) != 0 { + e.WritePrefix(len(txn.FileContractResolutions)) + for _, res := range txn.FileContractResolutions { + res.EncodeTo(e) + } + } + if fields&(1<<7) != 0 { + e.WritePrefix(len(txn.Attestations)) + for _, a := range txn.Attestations { + a.EncodeTo(e) + } + } + if fields&(1<<8) != 0 { + e.WriteBytes(txn.ArbitraryData) + } + if fields&(1<<9) != 0 { + txn.NewFoundationAddress.EncodeTo(e) + } + if fields&(1<<10) != 0 { + txn.MinerFee.EncodeTo(e) + } +} + +// EncodeTo implements types.EncoderTo. +func (b V2BlockData) EncodeTo(e *Encoder) { + e.WriteUint64(b.Height) + b.Commitment.EncodeTo(e) + e.WritePrefix(len(b.Transactions)) + for i := range b.Transactions { + b.Transactions[i].EncodeTo(e) + } +} + // DecodeFrom implements types.DecoderFrom. func (h *Hash256) DecodeFrom(d *Decoder) { d.Read(h[:]) } @@ -542,14 +804,6 @@ func (index *ChainIndex) DecodeFrom(d *Decoder) { index.ID.DecodeFrom(d) } -// DecodeFrom implements types.DecoderFrom. -func (h *BlockHeader) DecodeFrom(d *Decoder) { - h.ParentID.DecodeFrom(d) - h.Nonce = d.ReadUint64() - h.Timestamp = d.ReadTime() - h.MerkleRoot.DecodeFrom(d) -} - // DecodeFrom implements types.DecoderFrom. func (sco *SiacoinOutput) DecodeFrom(d *Decoder) { sco.Value.DecodeFrom(d) @@ -739,3 +993,263 @@ func (b *Block) DecodeFrom(d *Decoder) { b.Transactions[i].DecodeFrom(d) } } + +// DecodeFrom implements types.DecoderFrom. +func (p *SpendPolicy) DecodeFrom(d *Decoder) { + const ( + version = 1 + maxPolicies = 1024 + + opInvalid = iota + opAbove + opPublicKey + opThreshold + opUnlockConditions + ) + + var totalPolicies int + var readPolicy func() (SpendPolicy, error) + readPolicy = func() (SpendPolicy, error) { + switch op := d.ReadUint8(); op { + case opAbove: + return PolicyAbove(d.ReadUint64()), nil + case opPublicKey: + var pk PublicKey + pk.DecodeFrom(d) + return PolicyPublicKey(pk), nil + case opThreshold: + n := d.ReadUint8() + of := make([]SpendPolicy, d.ReadUint8()) + if totalPolicies += len(of); totalPolicies > maxPolicies { + return SpendPolicy{}, errors.New("policy is too complex") + } + var err error + for i := range of { + of[i], err = readPolicy() + if err != nil { + return SpendPolicy{}, err + } + } + return PolicyThreshold(n, of), nil + case opUnlockConditions: + uc := PolicyTypeUnlockConditions{ + Timelock: d.ReadUint64(), + PublicKeys: make([]PublicKey, d.ReadUint8()), + } + for i := range uc.PublicKeys { + uc.PublicKeys[i].DecodeFrom(d) + } + uc.SignaturesRequired = d.ReadUint8() + return SpendPolicy{uc}, nil + default: + return SpendPolicy{}, fmt.Errorf("unknown policy (opcode %d)", op) + } + } + + if v := d.ReadUint8(); v != version { + d.SetErr(fmt.Errorf("unsupported policy version (%v)", version)) + return + } + var err error + *p, err = readPolicy() + d.SetErr(err) +} + +// DecodeFrom implements types.DecoderFrom. +func (se *StateElement) DecodeFrom(d *Decoder) { + se.ID.DecodeFrom(d) + se.LeafIndex = d.ReadUint64() + se.MerkleProof = make([]Hash256, d.ReadPrefix()) + for i := range se.MerkleProof { + se.MerkleProof[i].DecodeFrom(d) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (in *V2SiacoinInput) DecodeFrom(d *Decoder) { + in.Parent.DecodeFrom(d) + in.SpendPolicy.DecodeFrom(d) + in.Signatures = make([]Signature, d.ReadPrefix()) + for i := range in.Signatures { + in.Signatures[i].DecodeFrom(d) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (sce *SiacoinElement) DecodeFrom(d *Decoder) { + sce.StateElement.DecodeFrom(d) + sce.SiacoinOutput.DecodeFrom(d) + sce.MaturityHeight = d.ReadUint64() +} + +// DecodeFrom implements types.DecoderFrom. +func (in *V2SiafundInput) DecodeFrom(d *Decoder) { + in.Parent.DecodeFrom(d) + in.ClaimAddress.DecodeFrom(d) + in.SpendPolicy.DecodeFrom(d) + in.Signatures = make([]Signature, d.ReadPrefix()) + for i := range in.Signatures { + in.Signatures[i].DecodeFrom(d) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (sfe *SiafundElement) DecodeFrom(d *Decoder) { + sfe.StateElement.DecodeFrom(d) + sfe.SiafundOutput.DecodeFrom(d) + sfe.ClaimStart.DecodeFrom(d) +} + +// DecodeFrom implements types.DecoderFrom. +func (fc *V2FileContract) DecodeFrom(d *Decoder) { + fc.Filesize = d.ReadUint64() + fc.FileMerkleRoot.DecodeFrom(d) + fc.WindowStart = d.ReadUint64() + fc.WindowEnd = d.ReadUint64() + fc.RenterOutput.DecodeFrom(d) + fc.HostOutput.DecodeFrom(d) + fc.MissedHostValue.DecodeFrom(d) + fc.TotalCollateral.DecodeFrom(d) + fc.RenterPublicKey.DecodeFrom(d) + fc.HostPublicKey.DecodeFrom(d) + fc.RevisionNumber = d.ReadUint64() + fc.RenterSignature.DecodeFrom(d) + fc.HostSignature.DecodeFrom(d) +} + +// DecodeFrom implements types.DecoderFrom. +func (fce *FileContractElement) DecodeFrom(d *Decoder) { + fce.StateElement.DecodeFrom(d) + fce.V2FileContract.DecodeFrom(d) +} + +// DecodeFrom implements types.DecoderFrom. +func (rev *V2FileContractRevision) DecodeFrom(d *Decoder) { + rev.Parent.DecodeFrom(d) + rev.Revision.DecodeFrom(d) +} + +// DecodeFrom implements types.DecoderFrom. +func (ren *FileContractRenewal) DecodeFrom(d *Decoder) { + ren.FinalRevision.DecodeFrom(d) + ren.InitialRevision.DecodeFrom(d) + ren.RenterRollover.DecodeFrom(d) + ren.HostRollover.DecodeFrom(d) + ren.RenterSignature.DecodeFrom(d) + ren.HostSignature.DecodeFrom(d) +} + +// DecodeFrom implements types.DecoderFrom. +func (sp *V2StorageProof) DecodeFrom(d *Decoder) { + sp.WindowStart.DecodeFrom(d) + sp.WindowProof = make([]Hash256, d.ReadPrefix()) + for i := range sp.WindowProof { + sp.WindowProof[i].DecodeFrom(d) + } + d.Read(sp.Leaf[:]) + sp.Proof = make([]Hash256, d.ReadPrefix()) + for i := range sp.Proof { + sp.Proof[i].DecodeFrom(d) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (res *FileContractResolution) DecodeFrom(d *Decoder) { + res.Parent.DecodeFrom(d) + fields := d.ReadUint8() + if fields&(1<<0) != 0 { + res.Renewal.DecodeFrom(d) + } + if fields&(1<<1) != 0 { + res.StorageProof.DecodeFrom(d) + } + if fields&(1<<2) != 0 { + res.Finalization.DecodeFrom(d) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (a *Attestation) DecodeFrom(d *Decoder) { + a.PublicKey.DecodeFrom(d) + a.Key = d.ReadString() + a.Value = d.ReadBytes() + a.Signature.DecodeFrom(d) +} + +// DecodeFrom implements types.DecoderFrom. +func (txn *V2Transaction) DecodeFrom(d *Decoder) { + if version := d.ReadUint8(); version != 2 { + d.SetErr(fmt.Errorf("unsupported transaction version (%v)", version)) + return + } + + fields := d.ReadUint64() + + if fields&(1<<0) != 0 { + txn.SiacoinInputs = make([]V2SiacoinInput, d.ReadPrefix()) + for i := range txn.SiacoinInputs { + txn.SiacoinInputs[i].DecodeFrom(d) + } + } + if fields&(1<<1) != 0 { + txn.SiacoinOutputs = make([]SiacoinOutput, d.ReadPrefix()) + for i := range txn.SiacoinOutputs { + txn.SiacoinOutputs[i].DecodeFrom(d) + } + } + if fields&(1<<2) != 0 { + txn.SiafundInputs = make([]V2SiafundInput, d.ReadPrefix()) + for i := range txn.SiafundInputs { + txn.SiafundInputs[i].DecodeFrom(d) + } + } + if fields&(1<<3) != 0 { + txn.SiafundOutputs = make([]SiafundOutput, d.ReadPrefix()) + for i := range txn.SiafundOutputs { + txn.SiafundOutputs[i].DecodeFrom(d) + } + } + if fields&(1<<4) != 0 { + txn.FileContracts = make([]V2FileContract, d.ReadPrefix()) + for i := range txn.FileContracts { + txn.FileContracts[i].DecodeFrom(d) + } + } + if fields&(1<<5) != 0 { + txn.FileContractRevisions = make([]V2FileContractRevision, d.ReadPrefix()) + for i := range txn.FileContractRevisions { + txn.FileContractRevisions[i].DecodeFrom(d) + } + } + if fields&(1<<6) != 0 { + txn.FileContractResolutions = make([]FileContractResolution, d.ReadPrefix()) + for i := range txn.FileContractResolutions { + txn.FileContractResolutions[i].DecodeFrom(d) + } + } + if fields&(1<<7) != 0 { + txn.Attestations = make([]Attestation, d.ReadPrefix()) + for i := range txn.Attestations { + txn.Attestations[i].DecodeFrom(d) + } + } + if fields&(1<<8) != 0 { + txn.ArbitraryData = d.ReadBytes() + } + if fields&(1<<9) != 0 { + txn.NewFoundationAddress.DecodeFrom(d) + } + if fields&(1<<10) != 0 { + txn.MinerFee.DecodeFrom(d) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (b *V2BlockData) DecodeFrom(d *Decoder) { + b.Height = d.ReadUint64() + b.Commitment.DecodeFrom(d) + b.Transactions = make([]V2Transaction, d.ReadPrefix()) + for i := range b.Transactions { + b.Transactions[i].DecodeFrom(d) + } +} diff --git a/types/hash.go b/types/hash.go index 33986d2a..c0a48854 100644 --- a/types/hash.go +++ b/types/hash.go @@ -77,8 +77,20 @@ func (acc *merkleAccumulator) root() Hash256 { return root } -func standardUnlockHash(pk PublicKey) Address { - // An Address is the Merkle root of UnlockConditions. Since the standard +// StandardAddress returns the standard v2 Address derived from pk. It is +// equivalent to PolicyPublicKey(pk).Address(). +func StandardAddress(pk PublicKey) Address { + buf := make([]byte, 12+1+1+len(pk)) + copy(buf, "sia/address|") + buf[12] = 1 // version + buf[13] = 2 // opPublicKey + copy(buf[14:], pk[:]) + return Address(blake2b.Sum256(buf)) +} + +// StandardUnlockHash returns the standard UnlockHash derived from pk. +func StandardUnlockHash(pk PublicKey) Address { + // An UnlockHash is the Merkle root of UnlockConditions. Since the standard // UnlockConditions use a single public key, the Merkle tree is: // // ┌─────────┴──────────┐ diff --git a/types/policy.go b/types/policy.go new file mode 100644 index 00000000..87e60ef5 --- /dev/null +++ b/types/policy.go @@ -0,0 +1,304 @@ +package types + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "math/bits" + "strconv" + "strings" +) + +// A SpendPolicy describes the conditions under which an input may be spent. +type SpendPolicy struct { + Type interface{ isPolicy() } +} + +// PolicyTypeAbove requires the input to be spent above a given block height. +type PolicyTypeAbove uint64 + +// PolicyAbove returns a policy that requires the input to be spent above a +// given block height. +func PolicyAbove(height uint64) SpendPolicy { + return SpendPolicy{PolicyTypeAbove(height)} +} + +// PolicyTypePublicKey requires the input to be signed by a given key. +type PolicyTypePublicKey PublicKey + +// PolicyPublicKey returns a policy that requires the input to be signed by a +// given key. +func PolicyPublicKey(pk PublicKey) SpendPolicy { + return SpendPolicy{PolicyTypePublicKey(pk)} +} + +// PolicyTypeThreshold requires at least N sub-policies to be satisfied. +type PolicyTypeThreshold struct { + N uint8 + Of []SpendPolicy +} + +// PolicyThreshold returns a policy that requires at least N sub-policies to be +// satisfied. +func PolicyThreshold(n uint8, of []SpendPolicy) SpendPolicy { + return SpendPolicy{PolicyTypeThreshold{n, of}} +} + +// AnyoneCanSpend returns a policy that has no requirements. +func AnyoneCanSpend() SpendPolicy { + return PolicyThreshold(0, nil) +} + +// PolicyTypeUnlockConditions reproduces the requirements imposed by Sia's +// original "UnlockConditions" type. It exists for compatibility purposes and +// should not be used to construct new policies. +type PolicyTypeUnlockConditions struct { + Timelock uint64 + PublicKeys []PublicKey + SignaturesRequired uint8 +} + +func (PolicyTypeAbove) isPolicy() {} +func (PolicyTypePublicKey) isPolicy() {} +func (PolicyTypeThreshold) isPolicy() {} +func (PolicyTypeUnlockConditions) isPolicy() {} + +func (uc PolicyTypeUnlockConditions) root() Hash256 { + buf := make([]byte, 65) + uint64Leaf := func(u uint64) Hash256 { + buf[0] = 0 + binary.LittleEndian.PutUint64(buf[1:], u) + return HashBytes(buf[:9]) + } + pubkeyLeaf := func(pk PublicKey) Hash256 { + buf[0] = 0 + copy(buf[1:], "ed25519\x00\x00\x00\x00\x00\x00\x00\x00\x00") + binary.LittleEndian.PutUint64(buf[17:], uint64(len(pk))) + copy(buf[25:], pk[:]) + return HashBytes(buf[:57]) + } + nodeHash := func(left, right Hash256) Hash256 { + buf[0] = 1 + copy(buf[1:], left[:]) + copy(buf[33:], right[:]) + return HashBytes(buf[:65]) + } + var trees [8]Hash256 + var numLeaves uint8 + addLeaf := func(h Hash256) { + i := 0 + for ; numLeaves&(1< 0 { + sb.WriteByte(',') + } + sb.WriteString(sp.String()) + } + sb.WriteString("])") + + case PolicyTypeUnlockConditions: + sb.WriteString("uc(") + sb.WriteString(strconv.FormatUint(p.Timelock, 10)) + sb.WriteString(",[") + for i, pk := range p.PublicKeys { + if i > 0 { + sb.WriteByte(',') + } + sb.WriteString(hex.EncodeToString(pk[:])) + } + sb.WriteString("],") + sb.WriteString(strconv.FormatUint(uint64(p.SignaturesRequired), 10)) + sb.WriteByte(')') + } + return sb.String() +} + +// ParseSpendPolicy parses a spend policy from a string. +func ParseSpendPolicy(s string) (SpendPolicy, error) { + var err error // sticky + nextToken := func() string { + s = strings.TrimSpace(s) + i := strings.IndexAny(s, "(),[]") + if err != nil || i == -1 { + return "" + } + t := s[:i] + s = s[i:] + return t + } + consume := func(b byte) { + if err != nil { + return + } + s = strings.TrimSpace(s) + if len(s) == 0 { + err = io.ErrUnexpectedEOF + } else if s[0] != b { + err = fmt.Errorf("expected %q, got %q", b, s[0]) + } else { + s = s[1:] + } + } + peek := func() byte { + if err != nil || len(s) == 0 { + return 0 + } + return s[0] + } + parseInt := func(bitSize int) (u uint64) { + t := nextToken() + if err != nil { + return 0 + } + u, err = strconv.ParseUint(t, 10, bitSize) + return + } + parsePubkey := func() (pk PublicKey) { + t := nextToken() + if err != nil { + return + } else if len(t) != 64 { + err = fmt.Errorf("invalid pubkey length (%d)", len(t)) + return + } + _, err = hex.Decode(pk[:], []byte(t)) + return + } + var parseSpendPolicy func() SpendPolicy + parseSpendPolicy = func() SpendPolicy { + typ := nextToken() + consume('(') + defer consume(')') + switch typ { + case "above": + return PolicyAbove(parseInt(64)) + case "pk": + return PolicyPublicKey(parsePubkey()) + case "thresh": + n := parseInt(8) + consume(',') + consume('[') + var of []SpendPolicy + for err == nil && peek() != ']' { + of = append(of, parseSpendPolicy()) + if peek() != ']' { + consume(',') + } + } + consume(']') + return PolicyThreshold(uint8(n), of) + case "uc": + timelock := parseInt(64) + consume(',') + consume('[') + var pks []PublicKey + for err == nil && peek() != ']' { + pks = append(pks, parsePubkey()) + if peek() != ']' { + consume(',') + } + } + consume(']') + consume(',') + sigsRequired := parseInt(8) + return SpendPolicy{ + PolicyTypeUnlockConditions{ + Timelock: timelock, + PublicKeys: pks, + SignaturesRequired: uint8(sigsRequired), + }, + } + default: + if err == nil { + err = fmt.Errorf("unrecognized policy type %q", typ) + } + return SpendPolicy{} + } + } + + p := parseSpendPolicy() + if err == nil && len(s) > 0 { + err = fmt.Errorf("trailing bytes: %q", s) + } + return p, err +} + +// MarshalText implements encoding.TextMarshaler. +func (p SpendPolicy) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (p *SpendPolicy) UnmarshalText(b []byte) (err error) { + *p, err = ParseSpendPolicy(string(b)) + return +} + +// MarshalJSON implements json.Marshaler. +func (p SpendPolicy) MarshalJSON() ([]byte, error) { + return []byte(`"` + p.String() + `"`), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (p *SpendPolicy) UnmarshalJSON(b []byte) (err error) { + return p.UnmarshalText(bytes.Trim(b, `"`)) +} diff --git a/types/types.go b/types/types.go index ccac0ba8..ea160e07 100644 --- a/types/types.go +++ b/types/types.go @@ -36,6 +36,7 @@ var ( SpecifierEd25519 = NewSpecifier("ed25519") SpecifierSiacoinOutput = NewSpecifier("siacoin output") SpecifierSiafundOutput = NewSpecifier("siafund output") + SpecifierClaimOutput = NewSpecifier("claim output") SpecifierFileContract = NewSpecifier("file contract") SpecifierStorageProof = NewSpecifier("storage proof") SpecifierFoundation = NewSpecifier("foundation") @@ -61,13 +62,8 @@ func (pk PublicKey) UnlockKey() UnlockKey { } } -// StandardAddress returns the standard address derived from pk. -func (pk PublicKey) StandardAddress() Address { - return standardUnlockHash(pk) -} - // StandardUnlockConditions returns the standard unlock conditions for pk. -func (pk PublicKey) StandardUnlockConditions() UnlockConditions { +func StandardUnlockConditions(pk PublicKey) UnlockConditions { return UnlockConditions{ PublicKeys: []UnlockKey{pk.UnlockKey()}, SignaturesRequired: 1, @@ -141,7 +137,7 @@ func (uc UnlockConditions) UnlockHash() Address { uc.PublicKeys[0].Algorithm == SpecifierEd25519 && len(uc.PublicKeys[0].Key) == len(PublicKey{}) && uc.SignaturesRequired == 1 { - return standardUnlockHash(*(*PublicKey)(uc.PublicKeys[0].Key)) + return StandardUnlockHash(*(*PublicKey)(uc.PublicKeys[0].Key)) } h := hasherPool.Get().(*Hasher) @@ -389,8 +385,7 @@ type TransactionSignature struct { Signature []byte `json:"signature"` } -// A Transaction transfers value by consuming existing Outputs and creating new -// Outputs. +// A Transaction effects a change of blockchain state. type Transaction struct { SiacoinInputs []SiacoinInput `json:"siacoinInputs,omitempty"` SiacoinOutputs []SiacoinOutput `json:"siacoinOutputs,omitempty"` @@ -457,28 +452,341 @@ func (txn *Transaction) FileContractID(i int) FileContractID { return FileContractID(h.Sum()) } -// A BlockHeader contains a Block's non-transaction data. -type BlockHeader struct { - ParentID BlockID `json:"parentID"` - Nonce uint64 `json:"nonce"` - Timestamp time.Time `json:"timestamp"` - MerkleRoot Hash256 `json:"merkleRoot"` +// A V2FileContract is a storage agreement between a renter and a host. It +// consists of a bidirectional payment channel that resolves as either "valid" +// or "missed" depending on whether a valid StorageProof is submitted for the +// contract. +type V2FileContract struct { + Filesize uint64 + FileMerkleRoot Hash256 + WindowStart uint64 + WindowEnd uint64 + RenterOutput SiacoinOutput + HostOutput SiacoinOutput + MissedHostValue Currency + TotalCollateral Currency + RenterPublicKey PublicKey + HostPublicKey PublicKey + RevisionNumber uint64 + + // signatures cover above fields + RenterSignature Signature + HostSignature Signature } -// ID returns a hash that uniquely identifies a block. -func (bh BlockHeader) ID() BlockID { - buf := make([]byte, 32+8+8+32) - copy(buf[0:32], bh.ParentID[:]) - binary.LittleEndian.PutUint64(buf[32:40], bh.Nonce) - binary.LittleEndian.PutUint64(buf[40:48], uint64(bh.Timestamp.Unix())) - copy(buf[48:80], bh.MerkleRoot[:]) - return BlockID(HashBytes(buf)) +// MissedHostOutput returns the host output that will be created if the contract +// resolves missed. +func (fc V2FileContract) MissedHostOutput() SiacoinOutput { + return SiacoinOutput{ + Value: fc.MissedHostValue, + Address: fc.HostOutput.Address, + } +} + +// A V2SiacoinInput spends an unspent SiacoinElement in the state accumulator by +// revealing its public key and signing the transaction. +type V2SiacoinInput struct { + Parent SiacoinElement + SpendPolicy SpendPolicy + Signatures []Signature +} + +// A V2SiafundInput spends an unspent SiafundElement in the state accumulator by +// revealing its public key and signing the transaction. Inputs also include a +// ClaimAddress, specifying the recipient of the siacoins that were earned by +// the SiafundElement. +type V2SiafundInput struct { + Parent SiafundElement + ClaimAddress Address + SpendPolicy SpendPolicy + Signatures []Signature +} + +// A V2FileContractRevision updates the state of an existing file contract. +type V2FileContractRevision struct { + Parent FileContractElement + Revision V2FileContract +} + +// A FileContractResolution closes a file contract's payment channel. There are +// four ways a contract can be resolved: +// +// 1) The renter and host can renew the contract. The old contract is finalized, +// and a portion of its funds are "rolled over" into a new contract. +// +// 2) The host can submit a valid storage proof within the contract's proof +// window. This is considered a "valid" resolution. +// +// 3) The renter and host can sign a final contract revision (a "finalization"), +// setting the contract's revision number to its maximum legal value. A +// finalization can be submitted at any time prior to the contract's WindowEnd. +// This is considered a "valid" resolution. +// +// 4) After the proof window has expired, anyone can submit an empty resolution +// with no storage proof or finalization. This is considered a "missed" +// resolution. +type FileContractResolution struct { + Parent FileContractElement + Renewal FileContractRenewal + StorageProof V2StorageProof + Finalization V2FileContract +} + +// HasRenewal returns true if the resolution contains a renewal. +func (fcr *FileContractResolution) HasRenewal() bool { + return fcr.Renewal != (FileContractRenewal{}) +} + +// HasStorageProof returns true if the resolution contains a storage proof. +func (fcr *FileContractResolution) HasStorageProof() bool { + sp := &fcr.StorageProof + return sp.WindowStart != (ChainIndex{}) || len(sp.WindowProof) > 0 || + sp.Leaf != ([64]byte{}) || len(sp.Proof) > 0 +} + +// HasFinalization returns true if the resolution contains a finalization. +func (fcr *FileContractResolution) HasFinalization() bool { + return fcr.Finalization != (V2FileContract{}) +} + +// A FileContractRenewal renews a file contract. +type FileContractRenewal struct { + FinalRevision V2FileContract + InitialRevision V2FileContract + RenterRollover Currency + HostRollover Currency + + // signatures cover above fields + RenterSignature Signature + HostSignature Signature +} + +// A V2StorageProof asserts the presence of a randomly-selected leaf within the +// Merkle tree of a V2FileContract's data. +type V2StorageProof struct { + // Selecting the leaf requires a source of unpredictable entropy; we use the + // ID of the block at the start of the proof window. The StorageProof + // includes this ID, and asserts its presence in the chain via a separate + // Merkle proof. + // + // For convenience, WindowStart is a ChainIndex rather than a BlockID. + // Consequently, WindowStart.Height MUST match the WindowStart field of the + // contract's final revision; otherwise, the prover could use any + // WindowStart, giving them control over the leaf index. + WindowStart ChainIndex + WindowProof []Hash256 + + // The leaf is always 64 bytes, extended with zeros if necessary. + Leaf [64]byte + Proof []Hash256 +} + +// A StateElement is a generic element within the state accumulator. +type StateElement struct { + ID Hash256 // SiacoinOutputID, FileContractID, etc. + LeafIndex uint64 + MerkleProof []Hash256 +} + +// A SiacoinElement is a volume of siacoins that is created and spent as an +// atomic unit. +type SiacoinElement struct { + StateElement + SiacoinOutput + MaturityHeight uint64 +} + +// A SiafundElement is a volume of siafunds that is created and spent as an +// atomic unit. +type SiafundElement struct { + StateElement + SiafundOutput + ClaimStart Currency // value of SiafundPool when element was created +} + +// A FileContractElement is a storage agreement between a renter and a host. +type FileContractElement struct { + StateElement + V2FileContract +} + +// An Attestation associates a key-value pair with an identity. For example, +// hosts attest to their network address by setting Key to "HostAnnouncement" +// and Value to their address, thereby allowing renters to discover them. +// Generally, an attestation for a particular key is considered to overwrite any +// previous attestations with the same key. (This allows hosts to announce a new +// network address, for example.) +type Attestation struct { + PublicKey PublicKey + Key string + Value []byte + Signature Signature +} + +// A V2Transaction effects a change of blockchain state. +type V2Transaction struct { + SiacoinInputs []V2SiacoinInput `json:"siacoinInputs"` + SiacoinOutputs []SiacoinOutput `json:"siacoinOutputs"` + SiafundInputs []V2SiafundInput `json:"siafundInputs"` + SiafundOutputs []SiafundOutput `json:"siafundOutputs"` + FileContracts []V2FileContract `json:"fileContracts"` + FileContractRevisions []V2FileContractRevision `json:"fileContractRevisions"` + FileContractResolutions []FileContractResolution `json:"fileContractResolutions"` + Attestations []Attestation `json:"attestations"` + ArbitraryData []byte `json:"arbitraryData"` + NewFoundationAddress Address `json:"newFoundationAddress"` + MinerFee Currency `json:"minerFee"` +} + +// ID returns the "semantic hash" of the transaction, covering all of the +// transaction's effects, but not incidental data such as signatures or Merkle +// proofs. This ensures that the ID will remain stable (i.e. non-malleable). +// +// To hash all of the data in a transaction, use the EncodeTo method. +func (txn *V2Transaction) ID() TransactionID { + // NOTE: In general, it is not possible to change a transaction's ID without + // causing it to become invalid, but an exception exists for non-standard + // spend policies. Consider a policy that may be satisfied by either a + // signature or a timelock. If a transaction is broadcast that signs the + // input, and the timelock has expired, then anyone may remove the signature + // from the input without invalidating the transaction. Of course, the net + // result will be the same, so arguably there's little reason to care. You + // only need to worry about this if you're hashing the full transaction data + // for some reason. + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/id/transaction|") + h.E.WritePrefix(len(txn.SiacoinInputs)) + for _, in := range txn.SiacoinInputs { + in.Parent.ID.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.SiacoinOutputs)) + for _, out := range txn.SiacoinOutputs { + out.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.SiafundInputs)) + for _, in := range txn.SiafundInputs { + in.Parent.ID.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.SiafundOutputs)) + for _, out := range txn.SiafundOutputs { + out.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.FileContracts)) + for _, fc := range txn.FileContracts { + fc.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.FileContractRevisions)) + for _, fcr := range txn.FileContractRevisions { + fcr.Parent.ID.EncodeTo(h.E) + fcr.Revision.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.FileContractResolutions)) + for _, fcr := range txn.FileContractResolutions { + fcr.Parent.ID.EncodeTo(h.E) + fcr.Renewal.EncodeTo(h.E) + fcr.StorageProof.WindowStart.EncodeTo(h.E) + fcr.Finalization.EncodeTo(h.E) + } + for _, a := range txn.Attestations { + a.EncodeTo(h.E) + } + h.E.WriteBytes(txn.ArbitraryData) + txn.NewFoundationAddress.EncodeTo(h.E) + txn.MinerFee.EncodeTo(h.E) + return TransactionID(h.Sum()) +} + +// SiacoinOutputID returns the ID of the siacoin output at index i. +func (txn *V2Transaction) SiacoinOutputID(i int) SiacoinOutputID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierSiacoinOutput.EncodeTo(h.E) + txn.ID().EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return SiacoinOutputID(h.Sum()) +} + +// SiafundOutputID returns the ID of the siafund output at index i. +func (txn *V2Transaction) SiafundOutputID(i int) SiafundOutputID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierSiafundOutput.EncodeTo(h.E) + txn.ID().EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return SiafundOutputID(h.Sum()) +} + +// SiafundClaimOutputID returns the ID of the siacoin claim output for the +// siafund input at index i. +func (txn *V2Transaction) SiafundClaimOutputID(i int) SiacoinOutputID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierClaimOutput.EncodeTo(h.E) + txn.ID().EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return SiacoinOutputID(h.Sum()) +} + +// FileContractID returns the ID of the file contract at index i. +func (txn *V2Transaction) FileContractID(i int) FileContractID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierFileContract.EncodeTo(h.E) + txn.ID().EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return FileContractID(h.Sum()) +} + +// DeepCopy returns a copy of txn that does not alias any of its memory. +func (txn *V2Transaction) DeepCopy() V2Transaction { + c := *txn + c.SiacoinInputs = append([]V2SiacoinInput(nil), c.SiacoinInputs...) + for i := range c.SiacoinInputs { + c.SiacoinInputs[i].Parent.MerkleProof = append([]Hash256(nil), c.SiacoinInputs[i].Parent.MerkleProof...) + c.SiacoinInputs[i].Signatures = append([]Signature(nil), c.SiacoinInputs[i].Signatures...) + } + c.SiacoinOutputs = append([]SiacoinOutput(nil), c.SiacoinOutputs...) + c.SiafundInputs = append([]V2SiafundInput(nil), c.SiafundInputs...) + for i := range c.SiafundInputs { + c.SiafundInputs[i].Parent.MerkleProof = append([]Hash256(nil), c.SiafundInputs[i].Parent.MerkleProof...) + c.SiafundInputs[i].Signatures = append([]Signature(nil), c.SiafundInputs[i].Signatures...) + } + c.SiafundOutputs = append([]SiafundOutput(nil), c.SiafundOutputs...) + c.FileContracts = append([]V2FileContract(nil), c.FileContracts...) + c.FileContractRevisions = append([]V2FileContractRevision(nil), c.FileContractRevisions...) + for i := range c.FileContractRevisions { + c.FileContractRevisions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractRevisions[i].Parent.MerkleProof...) + } + c.FileContractResolutions = append([]FileContractResolution(nil), c.FileContractResolutions...) + for i := range c.FileContractResolutions { + c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) + c.FileContractResolutions[i].StorageProof.WindowProof = append([]Hash256(nil), c.FileContractResolutions[i].StorageProof.WindowProof...) + c.FileContractResolutions[i].StorageProof.Proof = append([]Hash256(nil), c.FileContractResolutions[i].StorageProof.Proof...) + } + for i := range c.Attestations { + c.Attestations[i].Value = append([]byte(nil), c.Attestations[i].Value...) + } + c.ArbitraryData = append([]byte(nil), c.ArbitraryData...) + return c } // CurrentTimestamp returns the current time, rounded to the nearest second. The // time zone is set to UTC. func CurrentTimestamp() time.Time { return time.Now().Round(time.Second).UTC() } +// V2BlockData contains additional fields not present in v1 blocks. +type V2BlockData struct { + Height uint64 `json:"height"` + Commitment Hash256 `json:"commitment"` + Transactions []V2Transaction `json:"transactions"` +} + // A Block is a set of transactions grouped under a header. type Block struct { ParentID BlockID `json:"parentID"` @@ -486,13 +794,13 @@ type Block struct { Timestamp time.Time `json:"timestamp"` MinerPayouts []SiacoinOutput `json:"minerPayouts"` Transactions []Transaction `json:"transactions"` + + V2 *V2BlockData `json:"v2,omitempty"` } -// Header returns the header for the block. -// -// Note that this is a relatively expensive operation, as it computes the Merkle -// root of the block's transactions. -func (b *Block) Header() BlockHeader { +// MerkleRoot returns the Merkle root of the block's miner payouts and +// transactions. +func (b *Block) MerkleRoot() Hash256 { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) var acc merkleAccumulator @@ -508,20 +816,24 @@ func (b *Block) Header() BlockHeader { txn.EncodeTo(h.E) acc.addLeaf(h.Sum()) } - return BlockHeader{ - ParentID: b.ParentID, - Nonce: b.Nonce, - Timestamp: b.Timestamp, - MerkleRoot: acc.root(), - } + return acc.root() } -// ID returns a hash that uniquely identifies a block. It is equivalent to -// b.Header().ID(). -// -// Note that this is a relatively expensive operation, as it computes the Merkle -// root of the block's transactions. -func (b *Block) ID() BlockID { return b.Header().ID() } +// ID returns a hash that uniquely identifies a block. +func (b *Block) ID() BlockID { + buf := make([]byte, 32+8+8+32) + binary.LittleEndian.PutUint64(buf[32:], b.Nonce) + binary.LittleEndian.PutUint64(buf[40:], uint64(b.Timestamp.Unix())) + if b.V2 == nil { + root := b.MerkleRoot() // NOTE: expensive! + copy(buf[:32], b.ParentID[:]) + copy(buf[48:], root[:]) + } else { + copy(buf[:32], "sia/id/block|") + copy(buf[48:], b.V2.Commitment[:]) + } + return BlockID(HashBytes(buf)) +} // Implementations of fmt.Stringer, encoding.Text(Un)marshaler, and json.(Un)marshaler From 1648718b01532f53b828f341b25ecb379fd3f9e1 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Fri, 2 Jun 2023 01:31:44 -0400 Subject: [PATCH 03/53] blake2b: Add Accumulator --- internal/blake2b/blake2b.go | 36 ++++++++++++++++++ types/encoding.go | 19 ++-------- types/hash.go | 73 ++++++++++++++++++++----------------- types/policy.go | 67 +++------------------------------- types/types.go | 38 +------------------ 5 files changed, 88 insertions(+), 145 deletions(-) diff --git a/internal/blake2b/blake2b.go b/internal/blake2b/blake2b.go index 1d16e1d3..303fb62f 100644 --- a/internal/blake2b/blake2b.go +++ b/internal/blake2b/blake2b.go @@ -4,6 +4,7 @@ package blake2b import ( "hash" + "math/bits" "unsafe" "golang.org/x/crypto/blake2b" @@ -60,3 +61,38 @@ func hashBlocksGeneric(outs *[4][32]byte, msgs *[4][64]byte, prefix uint64) { outs[i] = hashBlockGeneric(&msgs[i], prefix) } } + +// An Accumulator is a generic Merkle tree accumulator. +type Accumulator struct { + Trees [64][32]byte + NumLeaves uint64 +} + +func (acc *Accumulator) hasTreeAtHeight(height int) bool { + return acc.NumLeaves&(1< 0 { sb.WriteByte(',') } - sb.WriteString(hex.EncodeToString(pk[:])) + sb.WriteString(hex.EncodeToString(pk.Key[:])) } sb.WriteString("],") sb.WriteString(strconv.FormatUint(uint64(p.SignaturesRequired), 10)) @@ -250,9 +195,9 @@ func ParseSpendPolicy(s string) (SpendPolicy, error) { timelock := parseInt(64) consume(',') consume('[') - var pks []PublicKey + var pks []UnlockKey for err == nil && peek() != ']' { - pks = append(pks, parsePubkey()) + pks = append(pks, parsePubkey().UnlockKey()) if peek() != ']' { consume(',') } @@ -264,7 +209,7 @@ func ParseSpendPolicy(s string) (SpendPolicy, error) { PolicyTypeUnlockConditions{ Timelock: timelock, PublicKeys: pks, - SignaturesRequired: uint8(sigsRequired), + SignaturesRequired: sigsRequired, }, } default: diff --git a/types/types.go b/types/types.go index ea160e07..0107ac7f 100644 --- a/types/types.go +++ b/types/types.go @@ -139,26 +139,7 @@ func (uc UnlockConditions) UnlockHash() Address { uc.SignaturesRequired == 1 { return StandardUnlockHash(*(*PublicKey)(uc.PublicKeys[0].Key)) } - - h := hasherPool.Get().(*Hasher) - defer hasherPool.Put(h) - h.Reset() - - var acc merkleAccumulator - h.E.WriteUint8(leafHashPrefix) - h.E.WriteUint64(uc.Timelock) - acc.addLeaf(h.Sum()) - for _, key := range uc.PublicKeys { - h.Reset() - h.E.WriteUint8(leafHashPrefix) - key.EncodeTo(h.E) - acc.addLeaf(h.Sum()) - } - h.Reset() - h.E.WriteUint8(leafHashPrefix) - h.E.WriteUint64(uc.SignaturesRequired) - acc.addLeaf(h.Sum()) - return Address(acc.root()) + return unlockConditionsRoot(uc) } // An Address is the hash of a set of UnlockConditions. @@ -801,22 +782,7 @@ type Block struct { // MerkleRoot returns the Merkle root of the block's miner payouts and // transactions. func (b *Block) MerkleRoot() Hash256 { - h := hasherPool.Get().(*Hasher) - defer hasherPool.Put(h) - var acc merkleAccumulator - for _, mp := range b.MinerPayouts { - h.Reset() - h.E.WriteUint8(leafHashPrefix) - mp.EncodeTo(h.E) - acc.addLeaf(h.Sum()) - } - for _, txn := range b.Transactions { - h.Reset() - h.E.WriteUint8(leafHashPrefix) - txn.EncodeTo(h.E) - acc.addLeaf(h.Sum()) - } - return acc.root() + return blockMerkleRoot(b.MinerPayouts, b.Transactions) } // ID returns a hash that uniquely identifies a block. From 1e84712973b4d600def7ca7c3e5fa17b1c026f62 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Fri, 2 Jun 2023 01:38:28 -0400 Subject: [PATCH 04/53] types: Flush Hasher.E in Reset --- types/hash.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/types/hash.go b/types/hash.go index a2e2084c..0e4920c5 100644 --- a/types/hash.go +++ b/types/hash.go @@ -19,8 +19,11 @@ type Hasher struct { E *Encoder } -// Reset resets the underlying hash digest state. -func (h *Hasher) Reset() { h.h.Reset() } +// Reset resets the underlying hash and encoder state. +func (h *Hasher) Reset() { + h.E.n = 0 + h.h.Reset() +} // Sum returns the digest of the objects written to the Hasher. func (h *Hasher) Sum() (sum Hash256) { From a1ec1539fefb7ed050d5c4914307a8d56b659d26 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Fri, 2 Jun 2023 17:47:55 -0400 Subject: [PATCH 05/53] types: Add Block encoding helpers --- chain/manager.go | 9 ++-- consensus/validation_test.go | 4 +- gateway/encoding.go | 56 +++++++++++++++++++++-- gateway/peer.go | 49 +++++++++++++++++--- types/encoding.go | 88 ++++++++++++++++++++++++------------ 5 files changed, 159 insertions(+), 47 deletions(-) diff --git a/chain/manager.go b/chain/manager.go index 12f06e61..15e1f0f7 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -25,8 +25,8 @@ type Checkpoint struct { // EncodeTo implements types.EncoderTo. func (c Checkpoint) EncodeTo(e *types.Encoder) { - e.WriteUint8(1) // block version - c.Block.EncodeTo(e) + e.WriteUint8(2) // block (and diff) version + types.V2Block(c.Block).EncodeTo(e) e.WriteUint8(1) // state version c.State.EncodeTo(e) e.WriteBool(c.Diff != nil) @@ -37,10 +37,11 @@ func (c Checkpoint) EncodeTo(e *types.Encoder) { // DecodeFrom implements types.DecoderFrom. func (c *Checkpoint) DecodeFrom(d *types.Decoder) { - if v := d.ReadUint8(); v != 1 { + v := d.ReadUint8() + if v != 2 { d.SetErr(fmt.Errorf("incompatible block version (%d)", v)) } - c.Block.DecodeFrom(d) + (*types.V2Block)(&c.Block).DecodeFrom(d) if v := d.ReadUint8(); v != 1 { d.SetErr(fmt.Errorf("incompatible state version (%d)", v)) } diff --git a/consensus/validation_test.go b/consensus/validation_test.go index ed992b40..deaa063f 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -23,10 +23,10 @@ func findBlockNonce(cs consensus.State, b *types.Block) { func deepCopyBlock(b types.Block) (b2 types.Block) { var buf bytes.Buffer e := types.NewEncoder(&buf) - b.EncodeTo(e) + types.V2Block(b).EncodeTo(e) e.Flush() d := types.NewBufDecoder(buf.Bytes()) - b2.DecodeFrom(d) + (*types.V2Block)(&b2).DecodeFrom(d) return } diff --git a/gateway/encoding.go b/gateway/encoding.go index 8049d910..a33d4dea 100644 --- a/gateway/encoding.go +++ b/gateway/encoding.go @@ -54,6 +54,24 @@ func (h *BlockHeader) decodeFrom(d *types.Decoder) { h.MerkleRoot.DecodeFrom(d) } +func (h *V2BlockHeader) encodeTo(e *types.Encoder) { + e.WriteUint64(h.Height) + h.ParentID.EncodeTo(e) + e.WriteUint64(h.Nonce) + e.WriteTime(h.Timestamp) + h.MinerAddress.EncodeTo(e) + h.Commitment.EncodeTo(e) +} + +func (h *V2BlockHeader) decodeFrom(d *types.Decoder) { + h.Height = d.ReadUint64() + h.ParentID.DecodeFrom(d) + h.Nonce = d.ReadUint64() + h.Timestamp = d.ReadTime() + h.MinerAddress.DecodeFrom(d) + h.Commitment.DecodeFrom(d) +} + type object interface { encodeRequest(e *types.Encoder) decodeRequest(d *types.Decoder) @@ -128,13 +146,13 @@ func (r *RPCSendBlocks) maxRequestLen() int { return 32 * 32 } func (r *RPCSendBlocks) encodeBlocksResponse(e *types.Encoder) { e.WritePrefix(len(r.Blocks)) for i := range r.Blocks { - r.Blocks[i].EncodeTo(e) + types.V1Block(r.Blocks[i]).EncodeTo(e) } } func (r *RPCSendBlocks) decodeBlocksResponse(d *types.Decoder) { r.Blocks = make([]types.Block, d.ReadPrefix()) for i := range r.Blocks { - r.Blocks[i].DecodeFrom(d) + (*types.V1Block)(&r.Blocks[i]).DecodeFrom(d) } } func (r *RPCSendBlocks) maxBlocksResponseLen() int { return 10 * 5e6 } @@ -155,8 +173,8 @@ type RPCSendBlk struct { func (r *RPCSendBlk) encodeRequest(e *types.Encoder) { r.ID.EncodeTo(e) } func (r *RPCSendBlk) decodeRequest(d *types.Decoder) { r.ID.DecodeFrom(d) } func (r *RPCSendBlk) maxRequestLen() int { return 32 } -func (r *RPCSendBlk) encodeResponse(e *types.Encoder) { r.Block.EncodeTo(e) } -func (r *RPCSendBlk) decodeResponse(d *types.Decoder) { r.Block.DecodeFrom(d) } +func (r *RPCSendBlk) encodeResponse(e *types.Encoder) { (types.V1Block)(r.Block).EncodeTo(e) } +func (r *RPCSendBlk) decodeResponse(d *types.Decoder) { (*types.V1Block)(&r.Block).DecodeFrom(d) } func (r *RPCSendBlk) maxResponseLen() int { return 5e6 } // RPCRelayHeader relays a header. @@ -169,6 +187,16 @@ func (r *RPCRelayHeader) encodeRequest(e *types.Encoder) { r.Header.encodeTo(e) func (r *RPCRelayHeader) decodeRequest(d *types.Decoder) { r.Header.decodeFrom(d) } func (r *RPCRelayHeader) maxRequestLen() int { return 32 + 8 + 8 + 32 } +// RPCRelayV2Header relays a v2 header. +type RPCRelayV2Header struct { + Header V2BlockHeader + emptyResponse +} + +func (r *RPCRelayV2Header) encodeRequest(e *types.Encoder) { r.Header.encodeTo(e) } +func (r *RPCRelayV2Header) decodeRequest(d *types.Decoder) { r.Header.decodeFrom(d) } +func (r *RPCRelayV2Header) maxRequestLen() int { return 32 + 8 + 8 + 32 } + // RPCRelayTransactionSet relays a transaction set. type RPCRelayTransactionSet struct { Transactions []types.Transaction @@ -189,6 +217,26 @@ func (r *RPCRelayTransactionSet) decodeRequest(d *types.Decoder) { } func (r *RPCRelayTransactionSet) maxRequestLen() int { return 5e6 } +// RPCRelayV2TransactionSet relays a v2 transaction set. +type RPCRelayV2TransactionSet struct { + Transactions []types.V2Transaction + emptyResponse +} + +func (r *RPCRelayV2TransactionSet) encodeRequest(e *types.Encoder) { + e.WritePrefix(len(r.Transactions)) + for i := range r.Transactions { + r.Transactions[i].EncodeTo(e) + } +} +func (r *RPCRelayV2TransactionSet) decodeRequest(d *types.Decoder) { + r.Transactions = make([]types.V2Transaction, d.ReadPrefix()) + for i := range r.Transactions { + r.Transactions[i].DecodeFrom(d) + } +} +func (r *RPCRelayV2TransactionSet) maxRequestLen() int { return 5e6 } + type rpcID types.Specifier func (id *rpcID) encodeTo(e *types.Encoder) { e.Write(id[:8]) } diff --git a/gateway/peer.go b/gateway/peer.go index 3eef8c4e..ff7edab1 100644 --- a/gateway/peer.go +++ b/gateway/peer.go @@ -1,6 +1,7 @@ package gateway import ( + "encoding/binary" "errors" "fmt" "net" @@ -48,13 +49,33 @@ type BlockHeader struct { } // ID returns a hash that uniquely identifies the block. -func (bh BlockHeader) ID() types.BlockID { - h := types.NewHasher() - bh.ParentID.EncodeTo(h.E) - h.E.WriteUint64(bh.Nonce) - h.E.WriteTime(bh.Timestamp) - bh.MerkleRoot.EncodeTo(h.E) - return types.BlockID(h.Sum()) +func (h BlockHeader) ID() types.BlockID { + buf := make([]byte, 32+8+8+32) + copy(buf[:32], h.ParentID[:]) + binary.LittleEndian.PutUint64(buf[32:], h.Nonce) + binary.LittleEndian.PutUint64(buf[40:], uint64(h.Timestamp.Unix())) + copy(buf[48:], h.MerkleRoot[:]) + return types.BlockID(types.HashBytes(buf)) +} + +// A V2BlockHeader contains a Block's non-transaction data. +type V2BlockHeader struct { + Height uint64 + ParentID types.BlockID + Nonce uint64 + Timestamp time.Time + MinerAddress types.Address + Commitment types.Hash256 +} + +// ID returns a hash that uniquely identifies the block. +func (h V2BlockHeader) ID() types.BlockID { + buf := make([]byte, 32+8+8+32) + copy(buf[:32], "sia/id/block|") + binary.LittleEndian.PutUint64(buf[32:], h.Nonce) + binary.LittleEndian.PutUint64(buf[40:], uint64(h.Timestamp.Unix())) + copy(buf[48:], h.Commitment[:]) + return types.BlockID(types.HashBytes(buf)) } // A Peer is a connected gateway peer. @@ -107,6 +128,8 @@ type RPCHandler interface { BlocksForHistory(history [32]types.BlockID) ([]types.Block, bool, error) RelayHeader(h BlockHeader, origin *Peer) RelayTransactionSet(txns []types.Transaction, origin *Peer) + RelayV2Header(h V2BlockHeader, origin *Peer) + RelayV2TransactionSet(txns []types.V2Transaction, origin *Peer) } // HandleRPC handles an RPC received from the peer. @@ -130,12 +153,24 @@ func (p *Peer) HandleRPC(id types.Specifier, stream net.Conn, h RPCHandler) erro } h.RelayHeader(r.Header, p) return nil + case *RPCRelayV2Header: + if err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { + return err + } + h.RelayV2Header(r.Header, p) + return nil case *RPCRelayTransactionSet: if err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { return err } h.RelayTransactionSet(r.Transactions, p) return nil + case *RPCRelayV2TransactionSet: + if err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { + return err + } + h.RelayV2TransactionSet(r.Transactions, p) + return nil case *RPCSendBlk: err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest) if err != nil { diff --git a/types/encoding.go b/types/encoding.go index 12bf13ff..859694bf 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -463,21 +463,6 @@ func (txn *Transaction) encodeNoSignatures(e *Encoder) { } } -// EncodeTo implements types.EncoderTo. -func (b Block) EncodeTo(e *Encoder) { - b.ParentID.EncodeTo(e) - e.WriteUint64(b.Nonce) - e.WriteTime(b.Timestamp) - e.WritePrefix(len(b.MinerPayouts)) - for i := range b.MinerPayouts { - b.MinerPayouts[i].EncodeTo(e) - } - e.WritePrefix(len(b.Transactions)) - for i := range b.Transactions { - b.Transactions[i].EncodeTo(e) - } -} - // EncodeTo implements types.EncoderTo. func (p SpendPolicy) EncodeTo(e *Encoder) { const ( @@ -743,6 +728,36 @@ func (b V2BlockData) EncodeTo(e *Encoder) { } } +// V1Block provides v1 encoding for Block. +type V1Block Block + +// EncodeTo implements types.EncoderTo. +func (b V1Block) EncodeTo(e *Encoder) { + b.ParentID.EncodeTo(e) + e.WriteUint64(b.Nonce) + e.WriteTime(b.Timestamp) + e.WritePrefix(len(b.MinerPayouts)) + for i := range b.MinerPayouts { + b.MinerPayouts[i].EncodeTo(e) + } + e.WritePrefix(len(b.Transactions)) + for i := range b.Transactions { + b.Transactions[i].EncodeTo(e) + } +} + +// V2Block provides v2 encoding for Block. +type V2Block Block + +// EncodeTo implements types.EncoderTo. +func (b V2Block) EncodeTo(e *Encoder) { + V1Block(b).EncodeTo(e) + e.WriteBool(b.V2 != nil) + if b.V2 != nil { + b.V2.EncodeTo(e) + } +} + // DecodeFrom implements types.DecoderFrom. func (h *Hash256) DecodeFrom(d *Decoder) { d.Read(h[:]) } @@ -974,21 +989,6 @@ func (txn *Transaction) DecodeFrom(d *Decoder) { } } -// DecodeFrom implements types.DecoderFrom. -func (b *Block) DecodeFrom(d *Decoder) { - b.ParentID.DecodeFrom(d) - b.Nonce = d.ReadUint64() - b.Timestamp = d.ReadTime() - b.MinerPayouts = make([]SiacoinOutput, d.ReadPrefix()) - for i := range b.MinerPayouts { - b.MinerPayouts[i].DecodeFrom(d) - } - b.Transactions = make([]Transaction, d.ReadPrefix()) - for i := range b.Transactions { - b.Transactions[i].DecodeFrom(d) - } -} - // DecodeFrom implements types.DecoderFrom. func (p *SpendPolicy) DecodeFrom(d *Decoder) { const ( @@ -1242,3 +1242,31 @@ func (b *V2BlockData) DecodeFrom(d *Decoder) { b.Transactions[i].DecodeFrom(d) } } + +// DecodeFrom implements types.DecoderFrom. +func (b *V1Block) DecodeFrom(d *Decoder) { + b.ParentID.DecodeFrom(d) + b.Nonce = d.ReadUint64() + b.Timestamp = d.ReadTime() + b.MinerPayouts = make([]SiacoinOutput, d.ReadPrefix()) + for i := range b.MinerPayouts { + b.MinerPayouts[i].DecodeFrom(d) + } + b.Transactions = make([]Transaction, d.ReadPrefix()) + for i := range b.Transactions { + b.Transactions[i].DecodeFrom(d) + } + if d.ReadBool() { + b.V2 = new(V2BlockData) + b.V2.DecodeFrom(d) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (b *V2Block) DecodeFrom(d *Decoder) { + (*V1Block)(b).DecodeFrom(d) + if d.ReadBool() { + b.V2 = new(V2BlockData) + b.V2.DecodeFrom(d) + } +} From e6a0024617100197d2227141b5da8d5c841b615d Mon Sep 17 00:00:00 2001 From: lukechampine Date: Fri, 2 Jun 2023 17:52:10 -0400 Subject: [PATCH 06/53] types,consensus: Implement Commitment hash --- consensus/state.go | 34 ++++++++++++++++++++++++++++++++++ types/hash.go | 8 +++++++- types/types.go | 6 +++++- 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index ac8dc9cd..ccaf6b98 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "go.sia.tech/core/internal/blake2b" "go.sia.tech/core/types" ) @@ -388,3 +389,36 @@ func (s State) PartialSigHash(txn types.Transaction, cf types.CoveredFields) typ return h.Sum() } + +// Commitment computes the commitment hash for a child block. +func (s State) Commitment(minerAddr types.Address, txns []types.Transaction, v2txns []types.V2Transaction) types.Hash256 { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + + // hash the state + s.EncodeTo(h.E) + stateHash := h.Sum() + + // hash the transactions + var acc blake2b.Accumulator + for _, txn := range txns { + h.Reset() + txn.EncodeTo(h.E) + acc.AddLeaf(h.Sum()) + } + for _, txn := range v2txns { + h.Reset() + txn.EncodeTo(h.E) + acc.AddLeaf(h.Sum()) + } + txnsHash := types.Hash256(acc.Root()) + + // concatenate the hashes and the miner address + h.Reset() + h.E.WriteString("sia/commitment|") + stateHash.EncodeTo(h.E) + minerAddr.EncodeTo(h.E) + txnsHash.EncodeTo(h.E) + return h.Sum() +} diff --git a/types/hash.go b/types/hash.go index 0e4920c5..4df2d4ac 100644 --- a/types/hash.go +++ b/types/hash.go @@ -119,7 +119,7 @@ func unlockConditionsRoot(uc UnlockConditions) Address { return acc.Root() } -func blockMerkleRoot(minerPayouts []SiacoinOutput, txns []Transaction) Hash256 { +func blockMerkleRoot(minerPayouts []SiacoinOutput, txns []Transaction, v2txns []V2Transaction) Hash256 { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) var acc blake2b.Accumulator @@ -135,5 +135,11 @@ func blockMerkleRoot(minerPayouts []SiacoinOutput, txns []Transaction) Hash256 { txn.EncodeTo(h.E) acc.AddLeaf(h.Sum()) } + for _, txn := range v2txns { + h.Reset() + h.E.WriteUint8(leafHashPrefix) + txn.EncodeTo(h.E) + acc.AddLeaf(h.Sum()) + } return acc.Root() } diff --git a/types/types.go b/types/types.go index 0107ac7f..023e5587 100644 --- a/types/types.go +++ b/types/types.go @@ -782,7 +782,11 @@ type Block struct { // MerkleRoot returns the Merkle root of the block's miner payouts and // transactions. func (b *Block) MerkleRoot() Hash256 { - return blockMerkleRoot(b.MinerPayouts, b.Transactions) + var v2txns []V2Transaction + if b.V2 != nil { + v2txns = b.V2.Transactions + } + return blockMerkleRoot(b.MinerPayouts, b.Transactions, v2txns) } // ID returns a hash that uniquely identifies a block. From 91c67a59e77ddb60321572ebd75b2a8cf5c180e7 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 6 Jun 2023 14:11:28 -0400 Subject: [PATCH 07/53] types: Tweak v2 encodings --- types/encoding.go | 79 +++++++++------- types/types.go | 235 ++++++++++++++++++++++++++++++---------------- 2 files changed, 199 insertions(+), 115 deletions(-) diff --git a/types/encoding.go b/types/encoding.go index 859694bf..ee8ee4a0 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -552,8 +552,8 @@ func (sfe SiafundElement) EncodeTo(e *Encoder) { func (fc V2FileContract) EncodeTo(e *Encoder) { e.WriteUint64(fc.Filesize) fc.FileMerkleRoot.EncodeTo(e) - e.WriteUint64(fc.WindowStart) - e.WriteUint64(fc.WindowEnd) + e.WriteUint64(fc.ProofHeight) + e.WriteUint64(fc.ExpirationHeight) fc.RenterOutput.EncodeTo(e) fc.HostOutput.EncodeTo(e) fc.MissedHostValue.EncodeTo(e) @@ -601,28 +601,27 @@ func (sp V2StorageProof) EncodeTo(e *Encoder) { } } +// EncodeTo implements types.EncoderTo. +func (FileContractExpiration) EncodeTo(e *Encoder) {} + // EncodeTo implements types.EncoderTo. func (res FileContractResolution) EncodeTo(e *Encoder) { res.Parent.EncodeTo(e) - var fields uint8 - for i, b := range [...]bool{ - res.HasRenewal(), - res.HasStorageProof(), - res.HasFinalization(), - } { - if b { - fields |= 1 << i - } - } - e.WriteUint8(fields) - if fields&(1<<0) != 0 { - res.Renewal.EncodeTo(e) - } - if fields&(1<<1) != 0 { - res.StorageProof.EncodeTo(e) - } - if fields&(1<<2) != 0 { - res.Finalization.EncodeTo(e) + switch r := res.Resolution.(type) { + case FileContractRenewal: + e.WriteUint8(0) + r.EncodeTo(e) + case V2StorageProof: + e.WriteUint8(1) + r.EncodeTo(e) + case V2FileContract: + e.WriteUint8(2) + r.EncodeTo(e) + case FileContractExpiration: + e.WriteUint8(3) + r.EncodeTo(e) + default: + panic(fmt.Sprintf("unhandled resolution type %T", r)) } } @@ -650,7 +649,7 @@ func (txn V2Transaction) EncodeTo(e *Encoder) { len(txn.FileContractResolutions) != 0, len(txn.Attestations) != 0, len(txn.ArbitraryData) != 0, - txn.NewFoundationAddress != VoidAddress, + txn.NewFoundationAddress != nil, !txn.MinerFee.IsZero(), } { if b { @@ -1093,8 +1092,8 @@ func (sfe *SiafundElement) DecodeFrom(d *Decoder) { func (fc *V2FileContract) DecodeFrom(d *Decoder) { fc.Filesize = d.ReadUint64() fc.FileMerkleRoot.DecodeFrom(d) - fc.WindowStart = d.ReadUint64() - fc.WindowEnd = d.ReadUint64() + fc.ProofHeight = d.ReadUint64() + fc.ExpirationHeight = d.ReadUint64() fc.RenterOutput.DecodeFrom(d) fc.HostOutput.DecodeFrom(d) fc.MissedHostValue.DecodeFrom(d) @@ -1142,18 +1141,31 @@ func (sp *V2StorageProof) DecodeFrom(d *Decoder) { } } +// DecodeFrom implements types.DecoderFrom. +func (*FileContractExpiration) DecodeFrom(d *Decoder) {} + // DecodeFrom implements types.DecoderFrom. func (res *FileContractResolution) DecodeFrom(d *Decoder) { res.Parent.DecodeFrom(d) - fields := d.ReadUint8() - if fields&(1<<0) != 0 { - res.Renewal.DecodeFrom(d) - } - if fields&(1<<1) != 0 { - res.StorageProof.DecodeFrom(d) - } - if fields&(1<<2) != 0 { - res.Finalization.DecodeFrom(d) + switch t := d.ReadUint8(); t { + case 0: + var r FileContractRenewal + r.DecodeFrom(d) + res.Resolution = r + case 1: + var r V2StorageProof + r.DecodeFrom(d) + res.Resolution = r + case 2: + var r V2FileContract + r.DecodeFrom(d) + res.Resolution = r + case 3: + var r FileContractExpiration + r.DecodeFrom(d) + res.Resolution = r + default: + d.SetErr(fmt.Errorf("unknown resolution type %d", t)) } } @@ -1226,6 +1238,7 @@ func (txn *V2Transaction) DecodeFrom(d *Decoder) { txn.ArbitraryData = d.ReadBytes() } if fields&(1<<9) != 0 { + txn.NewFoundationAddress = new(Address) txn.NewFoundationAddress.DecodeFrom(d) } if fields&(1<<10) != 0 { diff --git a/types/types.go b/types/types.go index 023e5587..74924708 100644 --- a/types/types.go +++ b/types/types.go @@ -438,21 +438,21 @@ func (txn *Transaction) FileContractID(i int) FileContractID { // or "missed" depending on whether a valid StorageProof is submitted for the // contract. type V2FileContract struct { - Filesize uint64 - FileMerkleRoot Hash256 - WindowStart uint64 - WindowEnd uint64 - RenterOutput SiacoinOutput - HostOutput SiacoinOutput - MissedHostValue Currency - TotalCollateral Currency - RenterPublicKey PublicKey - HostPublicKey PublicKey - RevisionNumber uint64 + Filesize uint64 `json:"filesize"` + FileMerkleRoot Hash256 `json:"fileMerkleRoot"` + ProofHeight uint64 `json:"proofHeight"` + ExpirationHeight uint64 `json:"expirationHeight"` + RenterOutput SiacoinOutput `json:"renterOutput"` + HostOutput SiacoinOutput `json:"hostOutput"` + MissedHostValue Currency `json:"missedHostValue"` + TotalCollateral Currency `json:"totalCollateral"` + RenterPublicKey PublicKey `json:"renterPublicKey"` + HostPublicKey PublicKey `json:"hostPublicKey"` + RevisionNumber uint64 `json:"revisionNumber"` // signatures cover above fields - RenterSignature Signature - HostSignature Signature + RenterSignature Signature `json:"renterSignature"` + HostSignature Signature `json:"hostSignature"` } // MissedHostOutput returns the host output that will be created if the contract @@ -467,9 +467,9 @@ func (fc V2FileContract) MissedHostOutput() SiacoinOutput { // A V2SiacoinInput spends an unspent SiacoinElement in the state accumulator by // revealing its public key and signing the transaction. type V2SiacoinInput struct { - Parent SiacoinElement - SpendPolicy SpendPolicy - Signatures []Signature + Parent SiacoinElement `json:"parent"` + SpendPolicy SpendPolicy `json:"spendPolicy"` + Signatures []Signature `json:"signatures"` } // A V2SiafundInput spends an unspent SiafundElement in the state accumulator by @@ -477,69 +477,69 @@ type V2SiacoinInput struct { // ClaimAddress, specifying the recipient of the siacoins that were earned by // the SiafundElement. type V2SiafundInput struct { - Parent SiafundElement - ClaimAddress Address - SpendPolicy SpendPolicy - Signatures []Signature + Parent SiafundElement `json:"parent"` + ClaimAddress Address `json:"claimAddress"` + SpendPolicy SpendPolicy `json:"spendPolicy"` + Signatures []Signature `json:"signatures"` } // A V2FileContractRevision updates the state of an existing file contract. type V2FileContractRevision struct { - Parent FileContractElement - Revision V2FileContract + Parent FileContractElement `json:"parent"` + Revision V2FileContract `json:"revision"` } // A FileContractResolution closes a file contract's payment channel. There are // four ways a contract can be resolved: // -// 1) The renter and host can renew the contract. The old contract is finalized, -// and a portion of its funds are "rolled over" into a new contract. +// 1) The host can submit a storage proof. This is considered a "valid" +// resolution: the RenterOutput and HostOutput fields of the (finalized) +// contract are created. // -// 2) The host can submit a valid storage proof within the contract's proof -// window. This is considered a "valid" resolution. +// 2) The renter and host can sign a final contract revision (a "finalization"), +// setting the contract's revision number to its maximum legal value. This is +// considered a "valid" resolution. // -// 3) The renter and host can sign a final contract revision (a "finalization"), -// setting the contract's revision number to its maximum legal value. A -// finalization can be submitted at any time prior to the contract's WindowEnd. +// 3) The renter and host can jointly renew the contract. The old contract is +// finalized, and a portion of its funds are "rolled over" into a new contract. // This is considered a "valid" resolution. // -// 4) After the proof window has expired, anyone can submit an empty resolution -// with no storage proof or finalization. This is considered a "missed" -// resolution. +// 4) Lastly, anyone can submit a contract expiration. Typically, this results +// in a "missed" resolution: the RenterOutput is created as usual, but the +// HostOutput will have value equal to MissedHostValue. However, if the contract +// is empty (i.e. its Filesize is 0), it instead resolves as valid. +// +// There are two restrictions on when a particular type of resolution may be +// submitted: a storage proof may only be submitted after the contract's +// ProofHeight, and an expiration may only be submitted after the contract's +// ExpirationHeight. Since anyone can submit an expiration, it is generally in +// the renter and/or host's interest to submit a different type of resolution +// prior to the ExpirationHeight. type FileContractResolution struct { - Parent FileContractElement - Renewal FileContractRenewal - StorageProof V2StorageProof - Finalization V2FileContract -} - -// HasRenewal returns true if the resolution contains a renewal. -func (fcr *FileContractResolution) HasRenewal() bool { - return fcr.Renewal != (FileContractRenewal{}) + Parent FileContractElement `json:"parent"` + Resolution FileContractResolutionType `json:"resolution"` } -// HasStorageProof returns true if the resolution contains a storage proof. -func (fcr *FileContractResolution) HasStorageProof() bool { - sp := &fcr.StorageProof - return sp.WindowStart != (ChainIndex{}) || len(sp.WindowProof) > 0 || - sp.Leaf != ([64]byte{}) || len(sp.Proof) > 0 +// FileContractResolutionType enumerates the types of file contract resolution. +type FileContractResolutionType interface { + isFileContractResolution() } -// HasFinalization returns true if the resolution contains a finalization. -func (fcr *FileContractResolution) HasFinalization() bool { - return fcr.Finalization != (V2FileContract{}) -} +func (FileContractRenewal) isFileContractResolution() {} +func (V2StorageProof) isFileContractResolution() {} +func (V2FileContract) isFileContractResolution() {} // finalization +func (FileContractExpiration) isFileContractResolution() {} // A FileContractRenewal renews a file contract. type FileContractRenewal struct { - FinalRevision V2FileContract - InitialRevision V2FileContract - RenterRollover Currency - HostRollover Currency + FinalRevision V2FileContract `json:"finalRevision"` + InitialRevision V2FileContract `json:"initialRevision"` + RenterRollover Currency `json:"renterRollover"` + HostRollover Currency `json:"hostRollover"` // signatures cover above fields - RenterSignature Signature - HostSignature Signature + RenterSignature Signature `json:"renterSignature"` + HostSignature Signature `json:"hostSignature"` } // A V2StorageProof asserts the presence of a randomly-selected leaf within the @@ -562,11 +562,16 @@ type V2StorageProof struct { Proof []Hash256 } +// A FileContractExpiration resolves an expired contract. A contract is +// considered expired when its proof window has elapsed. If the contract is not +// storing any data, it will resolve as valid; otherwise, it resolves as missed. +type FileContractExpiration struct{} + // A StateElement is a generic element within the state accumulator. type StateElement struct { - ID Hash256 // SiacoinOutputID, FileContractID, etc. - LeafIndex uint64 - MerkleProof []Hash256 + ID Hash256 `json:"id"` // SiacoinOutputID, FileContractID, etc. + LeafIndex uint64 `json:"leafIndex"` + MerkleProof []Hash256 `json:"merkleProof"` } // A SiacoinElement is a volume of siacoins that is created and spent as an @@ -574,7 +579,7 @@ type StateElement struct { type SiacoinElement struct { StateElement SiacoinOutput - MaturityHeight uint64 + MaturityHeight uint64 `json:"maturityHeight"` } // A SiafundElement is a volume of siafunds that is created and spent as an @@ -582,7 +587,7 @@ type SiacoinElement struct { type SiafundElement struct { StateElement SiafundOutput - ClaimStart Currency // value of SiafundPool when element was created + ClaimStart Currency `json:"claimStart"` // value of SiafundPool when element was created } // A FileContractElement is a storage agreement between a renter and a host. @@ -598,24 +603,24 @@ type FileContractElement struct { // previous attestations with the same key. (This allows hosts to announce a new // network address, for example.) type Attestation struct { - PublicKey PublicKey - Key string - Value []byte - Signature Signature + PublicKey PublicKey `json:"publicKey"` + Key string `json:"key"` + Value []byte `json:"value"` + Signature Signature `json:"signature"` } // A V2Transaction effects a change of blockchain state. type V2Transaction struct { - SiacoinInputs []V2SiacoinInput `json:"siacoinInputs"` - SiacoinOutputs []SiacoinOutput `json:"siacoinOutputs"` - SiafundInputs []V2SiafundInput `json:"siafundInputs"` - SiafundOutputs []SiafundOutput `json:"siafundOutputs"` - FileContracts []V2FileContract `json:"fileContracts"` - FileContractRevisions []V2FileContractRevision `json:"fileContractRevisions"` - FileContractResolutions []FileContractResolution `json:"fileContractResolutions"` - Attestations []Attestation `json:"attestations"` - ArbitraryData []byte `json:"arbitraryData"` - NewFoundationAddress Address `json:"newFoundationAddress"` + SiacoinInputs []V2SiacoinInput `json:"siacoinInputs,omitempty"` + SiacoinOutputs []SiacoinOutput `json:"siacoinOutputs,omitempty"` + SiafundInputs []V2SiafundInput `json:"siafundInputs,omitempty"` + SiafundOutputs []SiafundOutput `json:"siafundOutputs,omitempty"` + FileContracts []V2FileContract `json:"fileContracts,omitempty"` + FileContractRevisions []V2FileContractRevision `json:"fileContractRevisions,omitempty"` + FileContractResolutions []FileContractResolution `json:"fileContractResolutions,omitempty"` + Attestations []Attestation `json:"attestations,omitempty"` + ArbitraryData []byte `json:"arbitraryData,omitempty"` + NewFoundationAddress *Address `json:"newFoundationAddress,omitempty"` MinerFee Currency `json:"minerFee"` } @@ -666,15 +671,16 @@ func (txn *V2Transaction) ID() TransactionID { h.E.WritePrefix(len(txn.FileContractResolutions)) for _, fcr := range txn.FileContractResolutions { fcr.Parent.ID.EncodeTo(h.E) - fcr.Renewal.EncodeTo(h.E) - fcr.StorageProof.WindowStart.EncodeTo(h.E) - fcr.Finalization.EncodeTo(h.E) + fcr.Resolution.(EncoderTo).EncodeTo(h.E) } for _, a := range txn.Attestations { a.EncodeTo(h.E) } h.E.WriteBytes(txn.ArbitraryData) - txn.NewFoundationAddress.EncodeTo(h.E) + h.E.WriteBool(txn.NewFoundationAddress != nil) + if txn.NewFoundationAddress != nil { + txn.NewFoundationAddress.EncodeTo(h.E) + } txn.MinerFee.EncodeTo(h.E) return TransactionID(h.Sum()) } @@ -747,8 +753,11 @@ func (txn *V2Transaction) DeepCopy() V2Transaction { c.FileContractResolutions = append([]FileContractResolution(nil), c.FileContractResolutions...) for i := range c.FileContractResolutions { c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) - c.FileContractResolutions[i].StorageProof.WindowProof = append([]Hash256(nil), c.FileContractResolutions[i].StorageProof.WindowProof...) - c.FileContractResolutions[i].StorageProof.Proof = append([]Hash256(nil), c.FileContractResolutions[i].StorageProof.Proof...) + if sp, ok := c.FileContractResolutions[i].Resolution.(V2StorageProof); ok { + sp.WindowProof = append([]Hash256(nil), sp.WindowProof...) + sp.Proof = append([]Hash256(nil), sp.Proof...) + c.FileContractResolutions[i].Resolution = sp + } } for i := range c.Attestations { c.Attestations[i].Value = append([]byte(nil), c.Attestations[i].Value...) @@ -1023,7 +1032,7 @@ func (sp StorageProof) MarshalJSON() ([]byte, error) { }{sp.ParentID, hex.EncodeToString(sp.Leaf[:]), sp.Proof}) } -// UnmarshalJSON implements json.Marshaler. +// UnmarshalJSON implements json.Unmarshaler. func (sp *StorageProof) UnmarshalJSON(b []byte) error { var leaf string err := json.Unmarshal(b, &struct { @@ -1040,3 +1049,65 @@ func (sp *StorageProof) UnmarshalJSON(b []byte) error { } return nil } + +// MarshalJSON implements json.Marshaler. +func (res FileContractResolution) MarshalJSON() ([]byte, error) { + var typ string + switch res.Resolution.(type) { + case FileContractRenewal: + typ = "renewal" + case V2StorageProof: + typ = "storage proof" + case V2FileContract: + typ = "finalization" + case FileContractExpiration: + typ = "expiration" + } + return json.Marshal(struct { + Parent FileContractElement `json:"parent"` + Type string `json:"type"` + Resolution FileContractResolutionType `json:"resolution,omitempty"` + }{res.Parent, typ, res.Resolution}) +} + +// UnmarshalJSON implements json.Marshaler. +func (res *FileContractResolution) UnmarshalJSON(b []byte) error { + var p struct { + Parent FileContractElement + Type string + Resolution json.RawMessage + } + if err := json.Unmarshal(b, &p); err != nil { + return err + } + switch p.Type { + case "renewal": + var r FileContractRenewal + if err := json.Unmarshal(p.Resolution, &r); err != nil { + return err + } + res.Resolution = r + case "storage proof": + var r V2StorageProof + if err := json.Unmarshal(p.Resolution, &r); err != nil { + return err + } + res.Resolution = r + case "finalization": + var r V2FileContract + if err := json.Unmarshal(p.Resolution, &r); err != nil { + return err + } + res.Resolution = r + case "expiration": + var r FileContractExpiration + if err := json.Unmarshal(p.Resolution, &r); err != nil { + return err + } + res.Resolution = r + default: + return fmt.Errorf("unknown file contract resolution type %q", p.Type) + } + res.Parent = p.Parent + return nil +} From f8134a765aef5e0b56e2bdb53f5bfc1cc7c4cb50 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Fri, 9 Jun 2023 22:00:19 -0400 Subject: [PATCH 08/53] types,consensus: Implement v2 MidState support --- consensus/merkle.go | 528 ++++++++++++++++++++++++++++++++++++++++ consensus/state.go | 121 +++++++++ consensus/update.go | 187 ++++++++++++-- consensus/validation.go | 476 +++++++++++++++++++++++++++++++++++- types/encoding.go | 14 +- types/types.go | 70 +++--- 6 files changed, 1327 insertions(+), 69 deletions(-) create mode 100644 consensus/merkle.go diff --git a/consensus/merkle.go b/consensus/merkle.go new file mode 100644 index 00000000..5a406cf1 --- /dev/null +++ b/consensus/merkle.go @@ -0,0 +1,528 @@ +package consensus + +import ( + "encoding/binary" + "encoding/json" + "errors" + "math/bits" + "sort" + + "go.sia.tech/core/internal/blake2b" + "go.sia.tech/core/types" +) + +// from RFC 6961 +const leafHashPrefix = 0x00 + +// mergeHeight returns the height at which the proof paths of x and y merge. +func mergeHeight(x, y uint64) int { return bits.Len64(x ^ y) } + +// clearBits clears the n least significant bits of x. +func clearBits(x uint64, n int) uint64 { return x &^ (1< startOfNewTree && j >= 0; j-- { + leaves[j].MerkleProof = append(leaves[j].MerkleProof, oldRoot) + } + for ; j > startOfOldTree && j >= 0; j-- { + leaves[j].MerkleProof = append(leaves[j].MerkleProof, h) + } + // Record the left- and right-hand roots in treeGrowth, where + // applicable. + curTreeIndex := (acc.numLeaves + 1) - 1<= curTreeIndex { + treeGrowth[bit] = append(treeGrowth[bit], oldRoot) + } else if treeStartIndex >= prevTreeIndex { + treeGrowth[bit] = append(treeGrowth[bit], h) + } + } + // Merge with the existing tree at this height. Since we're always + // adding leaves on the right-hand side of the tree, the existing + // root is always the left-hand sibling. + h = blake2b.SumPair(oldRoot, h) + } + } + return treeGrowth +} + +func splitLeaves(ls []ElementLeaf, mid uint64) (left, right []ElementLeaf) { + split := sort.Search(len(ls), func(i int) bool { return ls[i].LeafIndex >= mid }) + return ls[:split], ls[split:] +} + +// updateLeaves overwrites the specified leaves in the accumulator. It updates +// the Merkle proofs of each leaf, and returns the leaves (grouped by tree) for +// later use. +func (acc *ElementAccumulator) updateLeaves(leaves []ElementLeaf) [64][]ElementLeaf { + var recompute func(i, j uint64, leaves []ElementLeaf) types.Hash256 + recompute = func(i, j uint64, leaves []ElementLeaf) types.Hash256 { + height := bits.TrailingZeros64(j - i) // equivalent to log2(j-i), as j-i is always a power of two + if len(leaves) == 1 && height == 0 { + return leaves[0].Hash() + } + mid := (i + j) / 2 + left, right := splitLeaves(leaves, mid) + var leftRoot, rightRoot types.Hash256 + if len(left) == 0 { + leftRoot = right[0].MerkleProof[height-1] + } else { + leftRoot = recompute(i, mid, left) + for i := range right { + right[i].MerkleProof[height-1] = leftRoot + } + } + if len(right) == 0 { + rightRoot = left[0].MerkleProof[height-1] + } else { + rightRoot = recompute(mid, j, right) + for i := range left { + left[i].MerkleProof[height-1] = rightRoot + } + } + return blake2b.SumPair(leftRoot, rightRoot) + } + + // Group leaves by tree, and sort them by leaf index. + var trees [64][]ElementLeaf + sort.Slice(leaves, func(i, j int) bool { + if len(leaves[i].MerkleProof) != len(leaves[j].MerkleProof) { + return len(leaves[i].MerkleProof) < len(leaves[j].MerkleProof) + } + return leaves[i].LeafIndex < leaves[j].LeafIndex + }) + for len(leaves) > 0 { + i := 0 + for i < len(leaves) && len(leaves[i].MerkleProof) == len(leaves[0].MerkleProof) { + i++ + } + trees[len(leaves[0].MerkleProof)] = leaves[:i] + leaves = leaves[i:] + } + + // Recompute the root of each tree with updated leaves, and fill in the + // proof of each leaf. + for height, leaves := range &trees { + if len(leaves) == 0 { + continue + } + // Determine the range of leaf indices that comprise this tree. We can + // compute this efficiently by zeroing the least-significant bits of + // numLeaves. (Zeroing these bits is equivalent to subtracting the + // number of leaves in all trees smaller than this one.) + start := clearBits(acc.numLeaves, height+1) + end := start + 1<= eru.numLeaves { + panic("cannot update an element that is not present in the accumulator") + } + if mh := mergeHeight(eru.numLeaves, e.LeafIndex); mh <= len(e.MerkleProof) { + e.MerkleProof = e.MerkleProof[:mh-1] + } + updateProof(e, &eru.updated) +} + +func historyLeafHash(index types.ChainIndex) types.Hash256 { + buf := make([]byte, 1+8+32) + buf[0] = leafHashPrefix + binary.LittleEndian.PutUint64(buf[1:], index.Height) + copy(buf[9:], index.ID[:]) + return types.HashBytes(buf) +} + +func historyProofRoot(index types.ChainIndex, proof []types.Hash256) types.Hash256 { + return proofRoot(historyLeafHash(index), index.Height, proof) +} + +// A HistoryAccumulator tracks the state of all ChainIndexs in a chain without +// storing the full sequence of indexes itself. +type HistoryAccumulator struct { + accumulator +} + +// Contains returns true if the accumulator contains the given index. +func (acc *HistoryAccumulator) Contains(index types.ChainIndex, proof []types.Hash256) bool { + return acc.hasTreeAtHeight(len(proof)) && acc.trees[len(proof)] == historyProofRoot(index, proof) +} + +// ApplyBlock integrates a ChainIndex into the accumulator, producing a +// HistoryApplyUpdate. +func (acc *HistoryAccumulator) ApplyBlock(index types.ChainIndex) (hau HistoryApplyUpdate) { + h := historyLeafHash(index) + i := 0 + for ; acc.hasTreeAtHeight(i); i++ { + hau.proof = append(hau.proof, acc.trees[i]) + hau.growth = append(hau.growth, h) + h = blake2b.SumPair(acc.trees[i], h) + } + acc.trees[i] = h + acc.numLeaves++ + return +} + +// RevertBlock produces a HistoryRevertUpdate from a ChainIndex. +func (acc *HistoryAccumulator) RevertBlock(index types.ChainIndex) HistoryRevertUpdate { + return HistoryRevertUpdate{index} +} + +// A HistoryApplyUpdate reflects the changes to a HistoryAccumulator resulting +// from the application of a block. +type HistoryApplyUpdate struct { + proof []types.Hash256 + growth []types.Hash256 +} + +// HistoryProof returns a history proof for the applied block. To prevent +// aliasing, it always returns new memory. +func (hau *HistoryApplyUpdate) HistoryProof() []types.Hash256 { + return append([]types.Hash256(nil), hau.proof...) +} + +// UpdateProof updates the supplied history proof to incorporate changes made to +// the chain history. The proof must be up-to-date; if it is not, UpdateProof +// may panic. +func (hau *HistoryApplyUpdate) UpdateProof(proof *[]types.Hash256) { + if len(hau.growth) > len(*proof) { + *proof = append(*proof, hau.growth[len(*proof)]) + *proof = append(*proof, hau.proof[len(*proof):]...) + } +} + +// UpdateHistoryProof updates the supplied storage proof to incorporate changes +// made to the chain history. The proof must be up-to-date; if it is not, +// UpdateHistoryProof may panic. +func (hau *HistoryApplyUpdate) UpdateHistoryProof(sp *types.V2StorageProof) { + hau.UpdateProof(&sp.HistoryProof) +} + +// A HistoryRevertUpdate reflects the changes to a HistoryAccumulator resulting +// from the removal of a block. +type HistoryRevertUpdate struct { + index types.ChainIndex +} + +// UpdateProof updates the supplied history proof to incorporate the changes +// made to the chain history. The proof must be up-to-date; if it is not, +// UpdateHistoryProof may panic. +func (hru *HistoryRevertUpdate) UpdateProof(height uint64, proof *[]types.Hash256) { + if mh := mergeHeight(hru.index.Height, height); mh <= len(*proof) { + *proof = (*proof)[:mh-1] + } +} + +// UpdateHistoryProof updates the supplied storage proof to incorporate the +// changes made to the chain history. The proof must be up-to-date; if it is +// not, UpdateHistoryProof may panic. +func (hru *HistoryRevertUpdate) UpdateHistoryProof(sp *types.V2StorageProof) { + hru.UpdateProof(sp.ProofStart.Height, &sp.HistoryProof) +} diff --git a/consensus/state.go b/consensus/state.go index ccaf6b98..9035c603 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -105,6 +105,9 @@ type State struct { OakTarget types.BlockID `json:"oakTarget"` FoundationPrimaryAddress types.Address `json:"foundationPrimaryAddress"` FoundationFailsafeAddress types.Address `json:"foundationFailsafeAddress"` + + Elements ElementAccumulator `json:"elements"` + History HistoryAccumulator `json:"history"` } // EncodeTo implements types.EncoderTo. @@ -121,6 +124,8 @@ func (s State) EncodeTo(e *types.Encoder) { s.OakTarget.EncodeTo(e) s.FoundationPrimaryAddress.EncodeTo(e) s.FoundationFailsafeAddress.EncodeTo(e) + s.Elements.EncodeTo(e) + s.History.EncodeTo(e) } // DecodeFrom implements types.DecoderFrom. @@ -137,6 +142,8 @@ func (s *State) DecodeFrom(d *types.Decoder) { s.OakTarget.DecodeFrom(d) s.FoundationPrimaryAddress.DecodeFrom(d) s.FoundationFailsafeAddress.DecodeFrom(d) + s.Elements.DecodeFrom(d) + s.History.DecodeFrom(d) } func (s State) childHeight() uint64 { return s.Index.Height + 1 } @@ -257,6 +264,16 @@ func (s State) FileContractTax(fc types.FileContract) types.Currency { return types.NewCurrency(lo, hi) } +// V2FileContractTax computes the tax levied on a given v2 contract. +func (s State) V2FileContractTax(fc types.V2FileContract) types.Currency { + sum := fc.RenterOutput.Value.Add(fc.HostOutput.Value) + tax := sum.Div64(25) // 4% + // round down to nearest multiple of SiafundCount + _, r := bits.Div64(0, tax.Hi, s.SiafundCount()) + _, r = bits.Div64(r, tax.Lo, s.SiafundCount()) + return tax.Sub(types.NewCurrency64(r)) +} + // StorageProofLeafIndex returns the leaf index used when computing or // validating a storage proof. func (s State) StorageProofLeafIndex(filesize uint64, windowStart types.ChainIndex, fcid types.FileContractID) uint64 { @@ -276,6 +293,16 @@ func (s State) StorageProofLeafIndex(filesize uint64, windowStart types.ChainInd return r } +// StorageProofLeafHash computes the leaf hash of file contract data. If +// len(leaf) < 64, it will be extended with zeros. +func (s State) StorageProofLeafHash(leaf []byte) types.Hash256 { + const leafSize = len(types.StorageProof{}.Leaf) + buf := make([]byte, 1+leafSize) + buf[0] = leafHashPrefix + copy(buf[1:], leaf) + return types.HashBytes(buf) +} + // replayPrefix returns the replay protection prefix at the current height. // These prefixes are included in a transaction's SigHash; a new prefix is used // after each hardfork to prevent replay attacks. @@ -422,3 +449,97 @@ func (s State) Commitment(minerAddr types.Address, txns []types.Transaction, v2t txnsHash.EncodeTo(h.E) return h.Sum() } + +// InputSigHash returns the hash that must be signed for each v2 transaction input. +func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { + // NOTE: This currently covers exactly the same fields as txn.ID(), and for + // similar reasons. + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/id/transaction|") + h.E.WritePrefix(len(txn.SiacoinInputs)) + for _, in := range txn.SiacoinInputs { + in.Parent.ID.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.SiacoinOutputs)) + for _, out := range txn.SiacoinOutputs { + out.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.SiafundInputs)) + for _, in := range txn.SiafundInputs { + in.Parent.ID.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.SiafundOutputs)) + for _, out := range txn.SiafundOutputs { + out.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.FileContracts)) + for _, fc := range txn.FileContracts { + fc.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.FileContractRevisions)) + for _, fcr := range txn.FileContractRevisions { + fcr.Parent.ID.EncodeTo(h.E) + fcr.Revision.EncodeTo(h.E) + } + h.E.WritePrefix(len(txn.FileContractResolutions)) + for _, fcr := range txn.FileContractResolutions { + fcr.Parent.ID.EncodeTo(h.E) + fcr.Resolution.(types.EncoderTo).EncodeTo(h.E) + } + for _, a := range txn.Attestations { + a.EncodeTo(h.E) + } + h.E.WriteBytes(txn.ArbitraryData) + h.E.WriteBool(txn.NewFoundationAddress != nil) + if txn.NewFoundationAddress != nil { + txn.NewFoundationAddress.EncodeTo(h.E) + } + txn.MinerFee.EncodeTo(h.E) + return h.Sum() +} + +// ContractSigHash returns the hash that must be signed for a v2 contract revision. +func (s State) ContractSigHash(fc types.V2FileContract) types.Hash256 { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/sig/filecontract|") + h.E.WriteUint64(fc.Filesize) + fc.FileMerkleRoot.EncodeTo(h.E) + h.E.WriteUint64(fc.ProofHeight) + h.E.WriteUint64(fc.ExpirationHeight) + fc.RenterOutput.EncodeTo(h.E) + fc.HostOutput.EncodeTo(h.E) + fc.MissedHostValue.EncodeTo(h.E) + fc.RenterPublicKey.EncodeTo(h.E) + fc.HostPublicKey.EncodeTo(h.E) + h.E.WriteUint64(fc.RevisionNumber) + return h.Sum() +} + +// RenewalSigHash returns the hash that must be signed for a file contract renewal. +func (s State) RenewalSigHash(fcr types.FileContractRenewal) types.Hash256 { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/sig/filecontractrenewal|") + fcr.FinalRevision.EncodeTo(h.E) + fcr.InitialRevision.EncodeTo(h.E) + fcr.RenterRollover.EncodeTo(h.E) + fcr.HostRollover.EncodeTo(h.E) + return h.Sum() +} + +// AttestationSigHash returns the hash that must be signed for an attestation. +func (s State) AttestationSigHash(a types.Attestation) types.Hash256 { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/sig/attestation|") + a.PublicKey.EncodeTo(h.E) + h.E.WriteString(a.Key) + h.E.WriteBytes(a.Value) + return h.Sum() +} diff --git a/consensus/update.go b/consensus/update.go index f445dd34..720b5e20 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -199,6 +199,9 @@ func ApplyState(s State, store Store, b types.Block) State { OakTarget: updateOakTarget(s), FoundationPrimaryAddress: newFoundationPrimaryAddress, FoundationFailsafeAddress: newFoundationFailsafeAddress, + + History: s.History, + Elements: s.Elements, } } @@ -218,9 +221,48 @@ func ApplyState(s State, store Store, b types.Block) State { OakTarget: updateOakTarget(s), FoundationPrimaryAddress: newFoundationPrimaryAddress, FoundationFailsafeAddress: newFoundationFailsafeAddress, + + History: s.History, + Elements: s.Elements, } } +// v2SiacoinOutputID returns the ID of the i'th siacoin output created by the +// transaction. +func v2SiacoinOutputID(txid types.TransactionID, i int) types.SiacoinOutputID { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + types.SpecifierSiacoinOutput.EncodeTo(h.E) + txid.EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return types.SiacoinOutputID(h.Sum()) +} + +// v2SiafundOutputID returns the ID of the i'th siafund output created by the +// transaction. +func v2SiafundOutputID(txid types.TransactionID, i int) types.SiafundOutputID { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + types.SpecifierSiafundOutput.EncodeTo(h.E) + txid.EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return types.SiafundOutputID(h.Sum()) +} + +// v2FileContractID returns the ID of the i'th file contract created by the +// transaction. +func v2FileContractID(txid types.TransactionID, i int) types.FileContractID { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + types.SpecifierFileContract.EncodeTo(h.E) + txid.EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return types.FileContractID(h.Sum()) +} + // A SiacoinOutputDiff records the creation, deletion, or spending of a // SiacoinOutput. type SiacoinOutputDiff struct { @@ -382,15 +424,15 @@ func (fcrd *FileContractRevisionDiff) DecodeFrom(d *types.Decoder) { // A TransactionDiff represents the changes to an ElementStore resulting from // the application of a transaction. type TransactionDiff struct { - CreatedSiacoinOutputs []SiacoinOutputDiff `json:"createdSiacoinOutputs"` - ImmatureSiacoinOutputs []DelayedSiacoinOutputDiff `json:"immatureSiacoinOutputs"` - CreatedSiafundOutputs []SiafundOutputDiff `json:"createdSiafundOutputs"` - CreatedFileContracts []FileContractDiff `json:"createdFileContracts"` + CreatedSiacoinOutputs []SiacoinOutputDiff `json:"createdSiacoinOutputs,omitempty"` + ImmatureSiacoinOutputs []DelayedSiacoinOutputDiff `json:"immatureSiacoinOutputs,omitempty"` + CreatedSiafundOutputs []SiafundOutputDiff `json:"createdSiafundOutputs,omitempty"` + CreatedFileContracts []FileContractDiff `json:"createdFileContracts,omitempty"` - SpentSiacoinOutputs []SiacoinOutputDiff `json:"spentSiacoinOutputs"` - SpentSiafundOutputs []SiafundOutputDiff `json:"spentSiafundOutputs"` - RevisedFileContracts []FileContractRevisionDiff `json:"revisedFileContracts"` - ValidFileContracts []FileContractDiff `json:"validFileContracts"` + SpentSiacoinOutputs []SiacoinOutputDiff `json:"spentSiacoinOutputs,omitempty"` + SpentSiafundOutputs []SiafundOutputDiff `json:"spentSiafundOutputs,omitempty"` + RevisedFileContracts []FileContractRevisionDiff `json:"revisedFileContracts,omitempty"` + ValidFileContracts []FileContractDiff `json:"validFileContracts,omitempty"` } // EncodeTo implements types.EncoderTo. @@ -465,13 +507,54 @@ func (td *TransactionDiff) DecodeFrom(d *types.Decoder) { } } -// A BlockDiff represents the changes to a Store resulting from the application -// of a block. +// A V2TransactionDiff contains the elements added to the state accumulator by a +// v2 transaction. +type V2TransactionDiff struct { + CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` + CreatedSiafundElements []types.SiafundElement `json:"createdSiafundElements,omitempty"` + CreatedFileContracts []types.FileContractElement `json:"createdFileContracts,omitempty"` +} + +// EncodeTo implements types.EncoderTo. +func (td V2TransactionDiff) EncodeTo(e *types.Encoder) { + e.WritePrefix(len(td.CreatedSiacoinElements)) + for i := range td.CreatedSiacoinElements { + td.CreatedSiacoinElements[i].EncodeTo(e) + } + e.WritePrefix(len(td.CreatedSiafundElements)) + for i := range td.CreatedSiafundElements { + td.CreatedSiafundElements[i].EncodeTo(e) + } + e.WritePrefix(len(td.CreatedFileContracts)) + for i := range td.CreatedFileContracts { + td.CreatedFileContracts[i].EncodeTo(e) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (td *V2TransactionDiff) DecodeFrom(d *types.Decoder) { + td.CreatedSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) + for i := range td.CreatedSiacoinElements { + td.CreatedSiacoinElements[i].DecodeFrom(d) + } + td.CreatedSiafundElements = make([]types.SiafundElement, d.ReadPrefix()) + for i := range td.CreatedSiafundElements { + td.CreatedSiafundElements[i].DecodeFrom(d) + } + td.CreatedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + for i := range td.CreatedFileContracts { + td.CreatedFileContracts[i].DecodeFrom(d) + } +} + +// A BlockDiff represents the changes to blockchain state resulting from the +// application of a block. type BlockDiff struct { - Transactions []TransactionDiff `json:"transactions"` - MaturedSiacoinOutputs []DelayedSiacoinOutputDiff `json:"maturedSiacoinOutputs"` - ImmatureSiacoinOutputs []DelayedSiacoinOutputDiff `json:"immatureSiacoinOutputs"` - MissedFileContracts []FileContractDiff `json:"missedFileContracts"` + Transactions []TransactionDiff `json:"transactions,omitempty"` + V2Transactions []V2TransactionDiff `json:"v2Transactions,omitempty"` + MaturedSiacoinOutputs []DelayedSiacoinOutputDiff `json:"maturedSiacoinOutputs,omitempty"` + ImmatureSiacoinOutputs []DelayedSiacoinOutputDiff `json:"immatureSiacoinOutputs,omitempty"` + MissedFileContracts []FileContractDiff `json:"missedFileContracts,omitempty"` } // EncodeTo implements types.EncoderTo. @@ -540,12 +623,42 @@ func (ms *MidState) ApplyTransaction(store Store, txn types.Transaction) { newContract := fcr.FileContract newContract.Payout = fc.Payout // see types.FileContractRevision docstring ms.fcs[fcr.ParentID] = newContract + ms.fcs[contractRevisionID(fcr.ParentID, fcr.RevisionNumber)] = fc // store previous revision for Diff later } for _, sp := range txn.StorageProofs { ms.spends[types.Hash256(sp.ParentID)] = txid } } +// ApplyV2Transaction applies a v2 transaction to the MidState. +func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { + txid := txn.ID() + for _, sci := range txn.SiacoinInputs { + ms.spends[sci.Parent.ID] = txid + } + for i, sco := range txn.SiacoinOutputs { + ms.scos[v2SiacoinOutputID(txid, i)] = sco + } + for _, sfi := range txn.SiafundInputs { + ms.spends[sfi.Parent.ID] = txid + } + for i, sfo := range txn.SiafundOutputs { + sfoid := v2SiafundOutputID(txid, i) + ms.sfos[sfoid] = sfo + ms.claims[sfoid] = ms.siafundPool + } + for i, fc := range txn.FileContracts { + ms.v2fcs[v2FileContractID(txid, i)] = fc + ms.siafundPool = ms.siafundPool.Add(ms.base.V2FileContractTax(fc)) + } + for _, fcr := range txn.FileContractRevisions { + ms.v2fcs[types.FileContractID(fcr.Parent.ID)] = fcr.Revision + } + for _, res := range txn.FileContractResolutions { + ms.spends[res.Parent.ID] = txid + } +} + // ApplyDiff applies b to s, returning the resulting effects. func ApplyDiff(s State, store Store, b types.Block) BlockDiff { if s.Index.Height > 0 && s.Index.ID != b.ParentID { @@ -600,7 +713,7 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { }) } for _, fcr := range txn.FileContractRevisions { - fc := ms.mustFileContract(store, fcr.ParentID) + fc := ms.mustFileContractParentRevision(store, fcr.ParentID, fcr.RevisionNumber) newContract := fcr.FileContract newContract.Payout = fc.Payout // see types.FileContractRevision docstring tdiff.RevisedFileContracts = append(tdiff.RevisedFileContracts, FileContractRevisionDiff{ @@ -628,6 +741,50 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { ms.ApplyTransaction(store, txn) } + if b.V2 != nil { + for _, txn := range b.V2.Transactions { + var tdiff V2TransactionDiff + txid := txn.ID() + for _, sco := range txn.SiacoinOutputs { + tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)))}, + SiacoinOutput: sco, + }) + } + for _, fc := range txn.FileContracts { + tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.FileContractElement{ + StateElement: types.StateElement{ID: types.Hash256(v2FileContractID(txid, len(tdiff.CreatedFileContracts)))}, + V2FileContract: fc, + }) + } + for _, sfi := range txn.SiafundInputs { + claimPortion := ms.siafundPool.Sub(sfi.Parent.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfi.Parent.Value) + tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)))}, + SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, + MaturityHeight: s.MaturityHeight(), + }) + } + for _, sfo := range txn.SiafundOutputs { + tdiff.CreatedSiafundElements = append(tdiff.CreatedSiafundElements, types.SiafundElement{ + StateElement: types.StateElement{ID: types.Hash256(v2SiafundOutputID(txid, len(tdiff.CreatedSiafundElements)))}, + SiafundOutput: sfo, + ClaimStart: ms.siafundPool, + }) + } + for _, res := range txn.FileContractResolutions { + if r, ok := res.Resolution.(types.FileContractRenewal); ok { + tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.FileContractElement{ + StateElement: types.StateElement{ID: types.Hash256(v2FileContractID(txid, len(tdiff.CreatedFileContracts)))}, + V2FileContract: r.InitialRevision, + }) + } + } + diff.V2Transactions = append(diff.V2Transactions, tdiff) + ms.ApplyV2Transaction(txn) + } + } + bid := b.ID() diff.MaturedSiacoinOutputs = store.MaturedSiacoinOutputs(s.childHeight()) for i, sco := range b.MinerPayouts { diff --git a/consensus/validation.go b/consensus/validation.go index f94b9e24..2befbe44 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -27,25 +27,35 @@ func ValidateHeader(s State, parentID types.BlockID, timestamp time.Time, nonce func validateMinerPayouts(s State, b types.Block) error { expectedSum := s.BlockReward() + var overflow bool for _, txn := range b.Transactions { for _, fee := range txn.MinerFees { if fee.IsZero() { return errors.New("transaction fee has zero value") } - var overflow bool expectedSum, overflow = expectedSum.AddWithOverflow(fee) if overflow { return errors.New("transaction fees overflow") } } } + if b.V2 != nil { + for _, txn := range b.V2.Transactions { + expectedSum, overflow = expectedSum.AddWithOverflow(txn.MinerFee) + if overflow { + return errors.New("transaction fees overflow") + } + } + if len(b.MinerPayouts) != 1 { + return errors.New("block has multiple miner payouts") + } + } var sum types.Currency for _, mp := range b.MinerPayouts { if mp.Value.IsZero() { return errors.New("miner payout has zero value") } - var overflow bool sum, overflow = sum.AddWithOverflow(mp.Value) if overflow { return errors.New("miner payouts overflow") @@ -62,10 +72,18 @@ func ValidateOrphan(s State, b types.Block) error { // TODO: calculate size more efficiently if uint64(types.EncodedLen(b)) > s.MaxBlockWeight() { return errors.New("block exceeds maximum weight") - } else if err := ValidateHeader(s, b.ParentID, b.Timestamp, b.Nonce, b.ID()); err != nil { - return err } else if err := validateMinerPayouts(s, b); err != nil { return err + } else if err := ValidateHeader(s, b.ParentID, b.Timestamp, b.Nonce, b.ID()); err != nil { + return err + } + + if b.V2 != nil { + if b.V2.Height != s.Index.Height+1 { + return errors.New("block height does not increment parent height") + } else if s.Commitment(b.MinerPayouts[0].Address, b.Transactions, b.V2.Transactions) != b.V2.Commitment { + return errors.New("commitment hash does not match header") + } } return nil } @@ -77,6 +95,7 @@ type MidState struct { sfos map[types.SiafundOutputID]types.SiafundOutput claims map[types.SiafundOutputID]types.Currency fcs map[types.FileContractID]types.FileContract + v2fcs map[types.FileContractID]types.V2FileContract spends map[types.Hash256]types.TransactionID siafundPool types.Currency } @@ -136,11 +155,35 @@ func (ms *MidState) mustFileContract(store Store, id types.FileContractID) types return fc } +func contractRevisionID(id types.FileContractID, revisionNumber uint64) types.FileContractID { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + id.EncodeTo(h.E) + h.E.WriteUint64(revisionNumber) + return types.FileContractID(h.Sum()) +} + +func (ms *MidState) mustFileContractParentRevision(store Store, id types.FileContractID, newRevisionNumber uint64) types.FileContract { + fc, ok := ms.fileContract(store, contractRevisionID(id, newRevisionNumber)) + if !ok { + if fc, ok = ms.fileContract(store, id); !ok { + panic("missing FileContract") + } + } + return fc +} + func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { txid, ok := ms.spends[id] return txid, ok } +func (ms *MidState) v2Revision(id types.FileContractID) (types.V2FileContract, bool) { + fc, ok := ms.v2fcs[id] + return fc, ok +} + // NewMidState constructs a MidState initialized to the provided base state. func NewMidState(s State) *MidState { return &MidState{ @@ -149,6 +192,7 @@ func NewMidState(s State) *MidState { sfos: make(map[types.SiafundOutputID]types.SiafundOutput), claims: make(map[types.SiafundOutputID]types.Currency), fcs: make(map[types.FileContractID]types.FileContract), + v2fcs: make(map[types.FileContractID]types.V2FileContract), spends: make(map[types.Hash256]types.TransactionID), siafundPool: s.SiafundPool, } @@ -228,10 +272,6 @@ func validateMinimumValues(ms *MidState, txn types.Transaction) error { } func validateSiacoins(ms *MidState, store Store, txn types.Transaction) error { - // NOTE: storage proofs and siafund claim outputs can also create new - // siacoin outputs, but we don't need to account for them here because they - // have a maturity delay and are thus unspendable within the same block - var inputSum types.Currency for i, sci := range txn.SiacoinInputs { if sci.UnlockConditions.Timelock > ms.base.childHeight() { @@ -407,7 +447,7 @@ func validateFileContracts(ms *MidState, store Store, txn types.Transaction) err for i, sp := range txn.StorageProofs { if txid, ok := ms.spent(types.Hash256(sp.ParentID)); ok { - return fmt.Errorf("storage proof %v conflicts with previous proof or revision (in %v)", i, txid) + return fmt.Errorf("storage proof %v conflicts with previous proof (in %v)", i, txid) } fc, ok := ms.fileContract(store, sp.ParentID) if !ok { @@ -555,6 +595,416 @@ func ValidateTransaction(ms *MidState, store Store, txn types.Transaction) error return nil } +func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { + // Add up all of the currency values in the transaction and check for + // overflow. This allows us to freely add any currency values in later + // validation functions without worrying about overflow. + + var sum types.Currency + var overflow bool + add := func(x types.Currency) { + if !overflow { + sum, overflow = sum.AddWithOverflow(x) + } + } + addContract := func(fc types.V2FileContract) { + add(fc.RenterOutput.Value) + add(fc.HostOutput.Value) + add(fc.MissedHostValue) + add(fc.TotalCollateral) + add(ms.base.V2FileContractTax(fc)) + } + + for i, sco := range txn.SiacoinOutputs { + if sco.Value.IsZero() { + return fmt.Errorf("siacoin output %v has zero value", i) + } + add(sco.Value) + } + for i, sfo := range txn.SiafundOutputs { + if sfo.Value == 0 { + return fmt.Errorf("siafund output %v has zero value", i) + } + overflow = overflow || sfo.Value > ms.base.SiafundCount() + } + for i, fc := range txn.FileContracts { + if fc.RenterOutput.Value.IsZero() && fc.HostOutput.Value.IsZero() { + return fmt.Errorf("file contract %v has zero value", i) + } + addContract(fc) + } + for _, fc := range txn.FileContractRevisions { + addContract(fc.Revision) + } + for i, res := range txn.FileContractResolutions { + switch r := res.Resolution.(type) { + case types.FileContractRenewal: + if r.InitialRevision.RenterOutput.Value.IsZero() && r.InitialRevision.HostOutput.Value.IsZero() { + return fmt.Errorf("file contract renewal %v creates contract with zero value", i) + } + addContract(r.InitialRevision) + add(r.RenterRollover) + add(r.HostRollover) + case types.V2FileContract: + addContract(r) + } + } + add(txn.MinerFee) + if overflow { + return errors.New("transaction outputs exceed inputs") // technically true + } + return nil +} + +func validateSpendPolicy(s State, p types.SpendPolicy, sigHash types.Hash256, sigs []types.Signature) error { + var verify func(types.SpendPolicy) error + verify = func(p types.SpendPolicy) error { + switch p := p.Type.(type) { + case types.PolicyTypeAbove: + if s.Index.Height > uint64(p) { + return nil + } + return fmt.Errorf("height not above %v", uint64(p)) + case types.PolicyTypePublicKey: + for i := range sigs { + if types.PublicKey(p).VerifyHash(sigHash, sigs[i]) { + sigs = sigs[i+1:] + return nil + } + } + return errors.New("no signatures matching pubkey") + case types.PolicyTypeThreshold: + for i := 0; i < len(p.Of) && p.N > 0 && len(p.Of[i:]) >= int(p.N); i++ { + if verify(p.Of[i]) == nil { + p.N-- + } + } + if p.N == 0 { + return nil + } + return errors.New("threshold not reached") + case types.PolicyTypeUnlockConditions: + if err := verify(types.PolicyAbove(p.Timelock)); err != nil { + return err + } + if p.SignaturesRequired > 255 { + return fmt.Errorf("too many signatures required (%v > 255)", p.SignaturesRequired) + } + n := uint8(p.SignaturesRequired) + of := make([]types.SpendPolicy, len(p.PublicKeys)) + for i, pk := range p.PublicKeys { + if pk.Algorithm != types.SpecifierEd25519 { + return fmt.Errorf("unsupported algorithm %v", pk.Algorithm) + } else if len(pk.Key) != len(types.PublicKey{}) { + return fmt.Errorf("invalid Ed25519 key length %v", len(pk.Key)) + } + of[i] = types.PolicyPublicKey(*(*types.PublicKey)(pk.Key)) + } + return verify(types.PolicyThreshold(n, of)) + default: + panic("invalid policy type") // developer error + } + } + return verify(p) +} + +func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { + sigHash := ms.base.InputSigHash(txn) + for i, sci := range txn.SiacoinInputs { + if txid, ok := ms.spent(sci.Parent.ID); ok { + return fmt.Errorf("siacoin input %v double-spends parent output (previously spent in %v)", i, txid) + } + + // check accumulator + if sci.Parent.LeafIndex == types.EphemeralLeafIndex { + if _, ok := ms.scos[types.SiacoinOutputID(sci.Parent.ID)]; !ok { + return fmt.Errorf("siacoin input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) + } + } else if !ms.base.Elements.ContainsUnspentSiacoinElement(sci.Parent) { + if ms.base.Elements.ContainsSpentSiacoinElement(sci.Parent) { + return fmt.Errorf("siacoin input %v double-spends output %v", i, sci.Parent.ID) + } + return fmt.Errorf("siacoin input %v spends output (%v) not present in the accumulator", i, sci.Parent.ID) + } + + // check spend policy + if sci.SpendPolicy.Address() != sci.Parent.Address { + return fmt.Errorf("siacoin input %v claims incorrect policy for parent address", i) + } else if err := validateSpendPolicy(ms.base, sci.SpendPolicy, sigHash, sci.Signatures); err != nil { + return fmt.Errorf("siacoin input %v failed to satisfy spend policy: %w", i, err) + } + } + + var inputSum, outputSum types.Currency + for _, sci := range txn.SiacoinInputs { + inputSum = inputSum.Add(sci.Parent.Value) + } + for _, out := range txn.SiacoinOutputs { + outputSum = outputSum.Add(out.Value) + } + for _, fc := range txn.FileContracts { + outputSum = outputSum.Add(fc.RenterOutput.Value).Add(fc.HostOutput.Value).Add(ms.base.V2FileContractTax(fc)) + } + for _, res := range txn.FileContractResolutions { + if r, ok := res.Resolution.(types.FileContractRenewal); ok { + // a renewal creates a new contract, optionally "rolling over" funds + // from the old contract + inputSum = inputSum.Add(r.RenterRollover) + inputSum = inputSum.Add(r.HostRollover) + + rev := r.InitialRevision + outputSum = outputSum.Add(rev.RenterOutput.Value).Add(rev.HostOutput.Value).Add(ms.base.V2FileContractTax(rev)) + } + } + outputSum = outputSum.Add(txn.MinerFee) + if inputSum != outputSum { + return fmt.Errorf("siacoin inputs (%d H) do not equal outputs (%d H)", inputSum, outputSum) + } + + return nil +} + +func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { + sigHash := ms.base.InputSigHash(txn) + for i, sci := range txn.SiafundInputs { + if txid, ok := ms.spent(sci.Parent.ID); ok { + return fmt.Errorf("siafund input %v double-spends parent output (previously spent in %v)", i, txid) + } + + // check accumulator + if sci.Parent.LeafIndex == types.EphemeralLeafIndex { + if _, ok := ms.sfos[types.SiafundOutputID(sci.Parent.ID)]; !ok { + return fmt.Errorf("siafund input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) + } + } else if !ms.base.Elements.ContainsUnspentSiafundElement(sci.Parent) { + if ms.base.Elements.ContainsSpentSiafundElement(sci.Parent) { + return fmt.Errorf("siafund input %v double-spends output %v", i, sci.Parent.ID) + } + return fmt.Errorf("siafund input %v spends output (%v) not present in the accumulator", i, sci.Parent.ID) + } + + // check spend policy + if sci.SpendPolicy.Address() != sci.Parent.Address { + return fmt.Errorf("siafund input %v claims incorrect policy for parent address", i) + } else if err := validateSpendPolicy(ms.base, sci.SpendPolicy, sigHash, sci.Signatures); err != nil { + return fmt.Errorf("siafund input %v failed to satisfy spend policy: %w", i, err) + } + } + + var inputSum, outputSum uint64 + for _, in := range txn.SiafundInputs { + inputSum += in.Parent.Value + } + for _, out := range txn.SiafundOutputs { + outputSum += out.Value + } + if inputSum != outputSum { + return fmt.Errorf("siafund inputs (%d SF) do not equal outputs (%d SF)", inputSum, outputSum) + } + return nil +} + +func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { + // Contract resolutions are height-sensitive, and thus can be invalidated by + // shallow reorgs; to minimize disruption, we require that transactions + // containing a resolution do not create new outputs. Creating, revising or + // resolving contracts *is* permitted, as these effects are generally not + // "built upon" as quickly as outputs, and therefore cause less disruption. + if len(txn.FileContractResolutions) > 0 && + (len(txn.SiacoinOutputs) > 0 || len(txn.SiafundOutputs) > 0) { + return errors.New("transaction both resolves a file contract and creates new outputs") + } + + validateParent := func(fce types.FileContractElement) error { + if txid, ok := ms.spent(fce.ID); ok { + return fmt.Errorf("has already been resolved in transaction %v", txid) + } else if !ms.base.Elements.ContainsUnresolvedFileContractElement(fce) { + if ms.base.Elements.ContainsResolvedFileContractElement(fce) { + return errors.New("has already been resolved in a previous block") + } + return errors.New("is not present in the accumulator") + } + return nil + } + + validateContract := func(fc types.V2FileContract) error { + switch { + case fc.ProofHeight < ms.base.childHeight(): + return fmt.Errorf("has proof height (%v) that has already passed", fc.ProofHeight) + case fc.ExpirationHeight <= fc.ProofHeight: + return fmt.Errorf("leaves no time between proof height (%v) and expiration height (%v)", fc.ProofHeight, fc.ExpirationHeight) + case fc.MissedHostValue.Cmp(fc.HostOutput.Value) > 0: + return fmt.Errorf("has missed host value (%d H) exceeding valid host value (%d H)", fc.MissedHostValue, fc.HostOutput.Value) + case fc.TotalCollateral.Cmp(fc.HostOutput.Value) > 0: + return fmt.Errorf("has total collateral (%d H) exceeding valid host value (%d H)", fc.TotalCollateral, fc.HostOutput.Value) + } + contractHash := ms.base.ContractSigHash(fc) + if !fc.RenterPublicKey.VerifyHash(contractHash, fc.RenterSignature) { + return fmt.Errorf("has invalid renter signature") + } else if !fc.HostPublicKey.VerifyHash(contractHash, fc.HostSignature) { + return fmt.Errorf("has invalid host signature") + } + return nil + } + + validateRevision := func(cur, rev types.V2FileContract) error { + curOutputSum := cur.RenterOutput.Value.Add(cur.HostOutput.Value) + revOutputSum := rev.RenterOutput.Value.Add(rev.HostOutput.Value) + switch { + case rev.RevisionNumber <= cur.RevisionNumber: + return fmt.Errorf("does not increase revision number (%v -> %v)", cur.RevisionNumber, rev.RevisionNumber) + case !revOutputSum.Equals(curOutputSum): + return fmt.Errorf("modifies output sum (%d H -> %d H)", curOutputSum, revOutputSum) + case rev.TotalCollateral != cur.TotalCollateral: + return fmt.Errorf("modifies total collateral") + case rev.ProofHeight < ms.base.childHeight(): + return fmt.Errorf("has proof height (%v) that has already passed", rev.ProofHeight) + case rev.ExpirationHeight <= rev.ProofHeight: + return fmt.Errorf("leaves no time between proof height (%v) and expiration height (%v)", rev.ProofHeight, rev.ExpirationHeight) + } + + // verify signatures + // + // NOTE: very important that we verify with the *current* keys! + contractHash := ms.base.ContractSigHash(rev) + if !cur.RenterPublicKey.VerifyHash(contractHash, rev.RenterSignature) { + return fmt.Errorf("has invalid renter signature") + } else if !cur.HostPublicKey.VerifyHash(contractHash, rev.HostSignature) { + return fmt.Errorf("has invalid host signature") + } + return nil + } + + for i, fc := range txn.FileContracts { + if err := validateContract(fc); err != nil { + return fmt.Errorf("file contract %v %s", i, err) + } + } + + for i, fcr := range txn.FileContractRevisions { + cur, rev := fcr.Parent.V2FileContract, fcr.Revision + if fc, ok := ms.v2Revision(types.FileContractID(fcr.Parent.ID)); ok { + cur = fc + } + if err := validateParent(fcr.Parent); err != nil { + return fmt.Errorf("file contract revision %v parent (%v) %s", i, fcr.Parent.ID, err) + } else if cur.ProofHeight < ms.base.childHeight() { + return fmt.Errorf("file contract revision %v cannot be applied to contract after proof height (%v)", i, cur.ProofHeight) + } else if rev.RevisionNumber == types.MaxRevisionNumber { + // NOTE: disallowing this means that resolutions always take + // precedence over revisions + return fmt.Errorf("file contract revision %v resolves contract", i) + } else if err := validateRevision(cur, rev); err != nil { + return fmt.Errorf("file contract revision %v %s", i, err) + } + } + + for i, fcr := range txn.FileContractResolutions { + if err := validateParent(fcr.Parent); err != nil { + return fmt.Errorf("file contract renewal %v parent (%v) %s", i, fcr.Parent.ID, err) + } + fc := fcr.Parent.V2FileContract + switch r := fcr.Resolution.(type) { + case types.FileContractRenewal: + renewal := r + old, renewed := renewal.FinalRevision, renewal.InitialRevision + if old.RevisionNumber != types.MaxRevisionNumber { + return fmt.Errorf("file contract renewal %v does not finalize old contract", i) + } else if err := validateRevision(fc, old); err != nil { + return fmt.Errorf("file contract renewal %v final revision %s", i, err) + } else if err := validateContract(renewed); err != nil { + return fmt.Errorf("file contract renewal %v initial revision %s", i, err) + } + + rollover := renewal.RenterRollover.Add(renewal.HostRollover) + newContractCost := renewed.RenterOutput.Value.Add(renewed.HostOutput.Value).Add(ms.base.V2FileContractTax(renewed)) + if renewal.RenterRollover.Cmp(old.RenterOutput.Value) > 0 { + return fmt.Errorf("file contract renewal %v has renter rollover (%d H) exceeding old output (%d H)", i, renewal.RenterRollover, old.RenterOutput.Value) + } else if renewal.HostRollover.Cmp(old.HostOutput.Value) > 0 { + return fmt.Errorf("file contract renewal %v has host rollover (%d H) exceeding old output (%d H)", i, renewal.HostRollover, old.HostOutput.Value) + } else if rollover.Cmp(newContractCost) > 0 { + return fmt.Errorf("file contract renewal %v has rollover (%d H) exceeding new contract cost (%d H)", i, rollover, newContractCost) + } + + renewalHash := ms.base.RenewalSigHash(renewal) + if !fc.RenterPublicKey.VerifyHash(renewalHash, renewal.RenterSignature) { + return fmt.Errorf("file contract renewal %v has invalid renter signature", i) + } else if !fc.HostPublicKey.VerifyHash(renewalHash, renewal.HostSignature) { + return fmt.Errorf("file contract renewal %v has invalid host signature", i) + } + case types.V2FileContract: + finalRevision := r + if finalRevision.RevisionNumber != types.MaxRevisionNumber { + return fmt.Errorf("file contract finalization %v does not set maximum revision number", i) + } else if err := validateRevision(fc, finalRevision); err != nil { + return fmt.Errorf("file contract finalization %v %s", i, err) + } + case types.V2StorageProof: + sp := r + if ms.base.childHeight() < fc.ProofHeight { + return fmt.Errorf("file contract storage proof %v cannot be submitted until after proof height (%v)", i, fc.ProofHeight) + } else if sp.ProofStart.Height != fc.ProofHeight { + // see note on this field in types.StorageProof + return fmt.Errorf("file contract storage proof %v has ProofStart (%v) that does not match contract ProofStart (%v)", i, sp.ProofStart.Height, fc.ProofHeight) + } else if ms.base.History.Contains(sp.ProofStart, sp.HistoryProof) { + return fmt.Errorf("file contract storage proof %v has invalid history proof", i) + } + leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart, types.FileContractID(fcr.Parent.ID)) + if proofRoot(ms.base.StorageProofLeafHash(sp.Leaf[:]), leafIndex, sp.Proof) != fc.FileMerkleRoot { + return fmt.Errorf("file contract storage proof %v has root that does not match contract Merkle root", i) + } + case types.FileContractExpiration: + if ms.base.childHeight() <= fc.ExpirationHeight { + return fmt.Errorf("file contract expiration %v cannot be submitted until after expiration height (%v) ", i, fc.ExpirationHeight) + } + } + } + + return nil +} + +func validateAttestations(ms *MidState, txn types.V2Transaction) error { + for i, a := range txn.Attestations { + switch { + case len(a.Key) == 0: + return fmt.Errorf("attestation %v has empty key", i) + case !a.PublicKey.VerifyHash(ms.base.AttestationSigHash(a), a.Signature): + return fmt.Errorf("attestation %v has invalid signature", i) + } + } + return nil +} + +func validateFoundationUpdate(ms *MidState, txn types.V2Transaction) error { + if txn.NewFoundationAddress == nil { + return nil + } + for _, in := range txn.SiacoinInputs { + if in.Parent.Address == ms.base.FoundationPrimaryAddress { + return nil + } + } + return errors.New("transaction changes Foundation address, but does not spend an input controlled by current address") +} + +// ValidateV2Transaction validates txn within the context of ms. +func ValidateV2Transaction(ms *MidState, txn types.V2Transaction) error { + if err := validateV2CurrencyValues(ms, txn); err != nil { + return err + } else if err := validateV2Siacoins(ms, txn); err != nil { + return err + } else if err := validateV2Siafunds(ms, txn); err != nil { + return err + } else if err := validateV2FileContracts(ms, txn); err != nil { + return err + } else if err := validateAttestations(ms, txn); err != nil { + return err + } else if err := validateFoundationUpdate(ms, txn); err != nil { + return err + } + return nil +} + // ValidateBlock validates b in the context of s and store. // // This function does not check whether the header's timestamp is too far in the @@ -571,5 +1021,13 @@ func ValidateBlock(s State, store Store, b types.Block) error { } ms.ApplyTransaction(store, txn) } + if b.V2 != nil { + for _, txn := range b.V2.Transactions { + if err := ValidateV2Transaction(ms, txn); err != nil { + return err + } + ms.ApplyV2Transaction(txn) + } + } return nil } diff --git a/types/encoding.go b/types/encoding.go index ee8ee4a0..c9d4257a 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -589,9 +589,9 @@ func (ren FileContractRenewal) EncodeTo(e *Encoder) { // EncodeTo implements types.EncoderTo. func (sp V2StorageProof) EncodeTo(e *Encoder) { - sp.WindowStart.EncodeTo(e) - e.WritePrefix(len(sp.WindowProof)) - for _, p := range sp.WindowProof { + sp.ProofStart.EncodeTo(e) + e.WritePrefix(len(sp.HistoryProof)) + for _, p := range sp.HistoryProof { p.EncodeTo(e) } e.Write(sp.Leaf[:]) @@ -1129,10 +1129,10 @@ func (ren *FileContractRenewal) DecodeFrom(d *Decoder) { // DecodeFrom implements types.DecoderFrom. func (sp *V2StorageProof) DecodeFrom(d *Decoder) { - sp.WindowStart.DecodeFrom(d) - sp.WindowProof = make([]Hash256, d.ReadPrefix()) - for i := range sp.WindowProof { - sp.WindowProof[i].DecodeFrom(d) + sp.ProofStart.DecodeFrom(d) + sp.HistoryProof = make([]Hash256, d.ReadPrefix()) + for i := range sp.HistoryProof { + sp.HistoryProof[i].DecodeFrom(d) } d.Read(sp.Leaf[:]) sp.Proof = make([]Hash256, d.ReadPrefix()) diff --git a/types/types.go b/types/types.go index 74924708..e683b0a1 100644 --- a/types/types.go +++ b/types/types.go @@ -29,6 +29,12 @@ const ( // HostContractIndex defines the index of the host's output and public key in // a FileContract. HostContractIndex = 1 + + // EphemeralLeafIndex is used as the LeafIndex of StateElements that are created + // and spent within the same block. Such elements do not require a proof of + // existence. They are, however, assigned a proper index and are incorporated + // into the state accumulator when the block is processed. + EphemeralLeafIndex = math.MaxUint64 ) // Various specifiers. @@ -546,16 +552,15 @@ type FileContractRenewal struct { // Merkle tree of a V2FileContract's data. type V2StorageProof struct { // Selecting the leaf requires a source of unpredictable entropy; we use the - // ID of the block at the start of the proof window. The StorageProof - // includes this ID, and asserts its presence in the chain via a separate - // Merkle proof. + // ID of the block at the contract's ProofHeight. The StorageProof includes + // this ID, and asserts its presence in the chain via a history proof. // - // For convenience, WindowStart is a ChainIndex rather than a BlockID. - // Consequently, WindowStart.Height MUST match the WindowStart field of the + // For convenience, ProofStart is a ChainIndex rather than a BlockID. + // Consequently, ProofStart.Height MUST match the ProofStart field of the // contract's final revision; otherwise, the prover could use any - // WindowStart, giving them control over the leaf index. - WindowStart ChainIndex - WindowProof []Hash256 + // ProofStart, giving them control over the leaf index. + ProofStart ChainIndex + HistoryProof []Hash256 // The leaf is always 64 bytes, extended with zeros if necessary. Leaf [64]byte @@ -685,49 +690,38 @@ func (txn *V2Transaction) ID() TransactionID { return TransactionID(h.Sum()) } -// SiacoinOutputID returns the ID of the siacoin output at index i. -func (txn *V2Transaction) SiacoinOutputID(i int) SiacoinOutputID { +// V2SiacoinOutputID returns the ID of the siacoin output at index i. +func (txn *V2Transaction) EphemeralSiacoinOutput(i int) SiacoinElement { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) h.Reset() SpecifierSiacoinOutput.EncodeTo(h.E) txn.ID().EncodeTo(h.E) h.E.WriteUint64(uint64(i)) - return SiacoinOutputID(h.Sum()) + return SiacoinElement{ + StateElement: StateElement{ + ID: h.Sum(), + LeafIndex: EphemeralLeafIndex, + }, + SiacoinOutput: txn.SiacoinOutputs[i], + } } -// SiafundOutputID returns the ID of the siafund output at index i. -func (txn *V2Transaction) SiafundOutputID(i int) SiafundOutputID { +// V2SiafundOutputID returns the ID of the siafund output at index i. +func (txn *V2Transaction) EphemeralSiafundOutput(i int) SiafundElement { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) h.Reset() SpecifierSiafundOutput.EncodeTo(h.E) txn.ID().EncodeTo(h.E) h.E.WriteUint64(uint64(i)) - return SiafundOutputID(h.Sum()) -} - -// SiafundClaimOutputID returns the ID of the siacoin claim output for the -// siafund input at index i. -func (txn *V2Transaction) SiafundClaimOutputID(i int) SiacoinOutputID { - h := hasherPool.Get().(*Hasher) - defer hasherPool.Put(h) - h.Reset() - SpecifierClaimOutput.EncodeTo(h.E) - txn.ID().EncodeTo(h.E) - h.E.WriteUint64(uint64(i)) - return SiacoinOutputID(h.Sum()) -} - -// FileContractID returns the ID of the file contract at index i. -func (txn *V2Transaction) FileContractID(i int) FileContractID { - h := hasherPool.Get().(*Hasher) - defer hasherPool.Put(h) - h.Reset() - SpecifierFileContract.EncodeTo(h.E) - txn.ID().EncodeTo(h.E) - h.E.WriteUint64(uint64(i)) - return FileContractID(h.Sum()) + return SiafundElement{ + StateElement: StateElement{ + ID: h.Sum(), + LeafIndex: EphemeralLeafIndex, + }, + SiafundOutput: txn.SiafundOutputs[i], + } } // DeepCopy returns a copy of txn that does not alias any of its memory. @@ -754,7 +748,7 @@ func (txn *V2Transaction) DeepCopy() V2Transaction { for i := range c.FileContractResolutions { c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) if sp, ok := c.FileContractResolutions[i].Resolution.(V2StorageProof); ok { - sp.WindowProof = append([]Hash256(nil), sp.WindowProof...) + sp.HistoryProof = append([]Hash256(nil), sp.HistoryProof...) sp.Proof = append([]Hash256(nil), sp.Proof...) c.FileContractResolutions[i].Resolution = sp } From 39f7857a220664866c01b33ca32a837033ef5164 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sun, 25 Jun 2023 19:42:36 -0400 Subject: [PATCH 09/53] consensus: Add v2 hardfork network parameters --- chain/network.go | 6 ++++++ consensus/state.go | 4 ++++ consensus/validation.go | 16 ++++++++++++---- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/chain/network.go b/chain/network.go index 14ccc352..c708d52d 100644 --- a/chain/network.go +++ b/chain/network.go @@ -45,6 +45,9 @@ func Mainnet() (*consensus.Network, types.Block) { n.HardforkFoundation.PrimaryAddress = parseAddr("addr:053b2def3cbdd078c19d62ce2b4f0b1a3c5e0ffbeeff01280efb1f8969b2f5bb4fdc680f0807") n.HardforkFoundation.FailsafeAddress = parseAddr("addr:27c22a6c6e6645802a3b8fa0e5374657438ef12716d2205d3e866272de1b644dbabd53d6d560") + n.HardforkV2.AllowHeight = 1000000 // TBD + n.HardforkV2.RequireHeight = 1025000 // ~six months later + b := types.Block{ Timestamp: n.HardforkOak.GenesisTimestamp, Transactions: []types.Transaction{{ @@ -134,6 +137,9 @@ func TestnetZen() (*consensus.Network, types.Block) { n.HardforkFoundation.PrimaryAddress = parseAddr("addr:053b2def3cbdd078c19d62ce2b4f0b1a3c5e0ffbeeff01280efb1f8969b2f5bb4fdc680f0807") n.HardforkFoundation.FailsafeAddress = types.VoidAddress + n.HardforkV2.AllowHeight = 30000 // TBD + n.HardforkV2.RequireHeight = 32000 // ~two weeks later + b := types.Block{ Timestamp: n.HardforkOak.GenesisTimestamp, Transactions: []types.Transaction{{ diff --git a/consensus/state.go b/consensus/state.go index 9035c603..07d16e01 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -69,6 +69,10 @@ type Network struct { PrimaryAddress types.Address `json:"primaryAddress"` FailsafeAddress types.Address `json:"failsafeAddress"` } `json:"hardforkFoundation"` + HardforkV2 struct { + AllowHeight uint64 `json:"allowHeight"` + RequireHeight uint64 `json:"requireHeight"` + } `json:"hardforkV2"` } // GenesisState returns the state to which the genesis block should be applied. diff --git a/consensus/validation.go b/consensus/validation.go index 2befbe44..3ed6c493 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -1015,13 +1015,21 @@ func ValidateBlock(s State, store Store, b types.Block) error { return err } ms := NewMidState(s) - for _, txn := range b.Transactions { - if err := ValidateTransaction(ms, store, txn); err != nil { - return err + if len(b.Transactions) > 0 { + if s.childHeight() >= ms.base.Network.HardforkV2.RequireHeight { + return errors.New("v1 transactions are not allowed after v2 hardfork is complete") + } + for _, txn := range b.Transactions { + if err := ValidateTransaction(ms, store, txn); err != nil { + return err + } + ms.ApplyTransaction(store, txn) } - ms.ApplyTransaction(store, txn) } if b.V2 != nil { + if s.childHeight() < ms.base.Network.HardforkV2.AllowHeight { + return errors.New("v2 transactions are not allowed until v2 hardfork begins") + } for _, txn := range b.V2.Transactions { if err := ValidateV2Transaction(ms, txn); err != nil { return err From 41726732f748a312c87f426214c7f7a218818ee4 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 27 Jun 2023 20:48:13 -0400 Subject: [PATCH 10/53] consensus: Properly apply v2 state changes --- consensus/update.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/consensus/update.go b/consensus/update.go index 720b5e20..5acc79bf 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -165,6 +165,13 @@ func ApplyState(s State, store Store, b types.Block) State { siafundPool = siafundPool.Add(s.FileContractTax(fc)) } } + if b.V2 != nil { + for _, txn := range b.V2.Transactions { + for _, fc := range txn.FileContracts { + siafundPool = siafundPool.Add(s.V2FileContractTax(fc)) + } + } + } // update state newFoundationPrimaryAddress := s.FoundationPrimaryAddress @@ -183,6 +190,14 @@ func ApplyState(s State, store Store, b types.Block) State { } } } + if b.V2 != nil { + for _, txn := range b.V2.Transactions { + if txn.NewFoundationAddress != nil { + newFoundationPrimaryAddress = *txn.NewFoundationAddress + newFoundationFailsafeAddress = *txn.NewFoundationAddress + } + } + } if b.ParentID == (types.BlockID{}) { // special handling for genesis block From 59fb0e845bbe9c16358c8d9255db26005ddd926c Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 11 Jul 2023 15:33:10 -0400 Subject: [PATCH 11/53] types: Add V2 prefix to v2 file contract types --- consensus/merkle.go | 44 ++++++++++++++++++--- consensus/state.go | 2 +- consensus/update.go | 14 +++---- consensus/validation.go | 16 ++++---- types/encoding.go | 34 ++++++++++------ types/types.go | 86 ++++++++++++++++++++++------------------- 6 files changed, 123 insertions(+), 73 deletions(-) diff --git a/consensus/merkle.go b/consensus/merkle.go index 5a406cf1..32d949b4 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -35,6 +35,23 @@ func proofRoot(leafHash types.Hash256, leafIndex uint64, proof []types.Hash256) return root } +func storageProofRoot(leafHash types.Hash256, leafIndex uint64, filesize uint64, proof []types.Hash256) types.Hash256 { + const leafSize = uint64(len(types.V2StorageProof{}.Leaf)) + lastLeafIndex := filesize / leafSize + if filesize%leafSize == 0 { + lastLeafIndex-- + } + subtreeHeight := bits.Len64(leafIndex ^ lastLeafIndex) + if len(proof) < subtreeHeight { + return types.Hash256{} // invalid proof + } + root := proofRoot(leafHash, leafIndex, proof[:subtreeHeight]) + for _, h := range proof[subtreeHeight:] { + root = blake2b.SumPair(root, h) + } + return root +} + // An ElementLeaf represents a leaf in the ElementAccumulator Merkle tree. type ElementLeaf struct { types.StateElement @@ -98,6 +115,21 @@ func FileContractLeaf(e types.FileContractElement, spent bool) ElementLeaf { h.Reset() h.E.WriteString("sia/leaf/filecontract|") e.ID.EncodeTo(h.E) + e.FileContract.EncodeTo(h.E) + return ElementLeaf{ + StateElement: e.StateElement, + ElementHash: h.Sum(), + Spent: spent, + } +} + +// V2FileContractLeaf returns the ElementLeaf for a V2FileContractElement. +func V2FileContractLeaf(e types.V2FileContractElement, spent bool) ElementLeaf { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/leaf/v2filecontract|") + e.ID.EncodeTo(h.E) e.V2FileContract.EncodeTo(h.E) return ElementLeaf{ StateElement: e.StateElement, @@ -204,16 +236,16 @@ func (acc *ElementAccumulator) ContainsSpentSiafundElement(sfe types.SiafundElem return acc.containsLeaf(SiafundLeaf(sfe, true)) } -// ContainsUnresolvedFileContractElement returns true if the accumulator +// ContainsUnresolvedV2FileContractElement returns true if the accumulator // contains fce as an unresolved file contract. -func (acc *ElementAccumulator) ContainsUnresolvedFileContractElement(fce types.FileContractElement) bool { - return acc.containsLeaf(FileContractLeaf(fce, false)) +func (acc *ElementAccumulator) ContainsUnresolvedV2FileContractElement(fce types.V2FileContractElement) bool { + return acc.containsLeaf(V2FileContractLeaf(fce, false)) } -// ContainsResolvedFileContractElement returns true if the accumulator contains +// ContainsResolvedV2FileContractElement returns true if the accumulator contains // fce as a resolved file contract. -func (acc *ElementAccumulator) ContainsResolvedFileContractElement(fce types.FileContractElement) bool { - return acc.containsLeaf(FileContractLeaf(fce, true)) +func (acc *ElementAccumulator) ContainsResolvedV2FileContractElement(fce types.V2FileContractElement) bool { + return acc.containsLeaf(V2FileContractLeaf(fce, true)) } // addLeaves adds the supplied leaves to the accumulator, filling in their diff --git a/consensus/state.go b/consensus/state.go index 07d16e01..703e6048 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -524,7 +524,7 @@ func (s State) ContractSigHash(fc types.V2FileContract) types.Hash256 { } // RenewalSigHash returns the hash that must be signed for a file contract renewal. -func (s State) RenewalSigHash(fcr types.FileContractRenewal) types.Hash256 { +func (s State) RenewalSigHash(fcr types.V2FileContractRenewal) types.Hash256 { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() diff --git a/consensus/update.go b/consensus/update.go index 5acc79bf..58b5c2a6 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -525,9 +525,9 @@ func (td *TransactionDiff) DecodeFrom(d *types.Decoder) { // A V2TransactionDiff contains the elements added to the state accumulator by a // v2 transaction. type V2TransactionDiff struct { - CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` - CreatedSiafundElements []types.SiafundElement `json:"createdSiafundElements,omitempty"` - CreatedFileContracts []types.FileContractElement `json:"createdFileContracts,omitempty"` + CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` + CreatedSiafundElements []types.SiafundElement `json:"createdSiafundElements,omitempty"` + CreatedFileContracts []types.V2FileContractElement `json:"createdFileContracts,omitempty"` } // EncodeTo implements types.EncoderTo. @@ -556,7 +556,7 @@ func (td *V2TransactionDiff) DecodeFrom(d *types.Decoder) { for i := range td.CreatedSiafundElements { td.CreatedSiafundElements[i].DecodeFrom(d) } - td.CreatedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + td.CreatedFileContracts = make([]types.V2FileContractElement, d.ReadPrefix()) for i := range td.CreatedFileContracts { td.CreatedFileContracts[i].DecodeFrom(d) } @@ -767,7 +767,7 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { }) } for _, fc := range txn.FileContracts { - tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.FileContractElement{ + tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.V2FileContractElement{ StateElement: types.StateElement{ID: types.Hash256(v2FileContractID(txid, len(tdiff.CreatedFileContracts)))}, V2FileContract: fc, }) @@ -788,8 +788,8 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { }) } for _, res := range txn.FileContractResolutions { - if r, ok := res.Resolution.(types.FileContractRenewal); ok { - tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.FileContractElement{ + if r, ok := res.Resolution.(types.V2FileContractRenewal); ok { + tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.V2FileContractElement{ StateElement: types.StateElement{ID: types.Hash256(v2FileContractID(txid, len(tdiff.CreatedFileContracts)))}, V2FileContract: r.InitialRevision, }) diff --git a/consensus/validation.go b/consensus/validation.go index 3ed6c493..5895ef90 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -638,7 +638,7 @@ func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { } for i, res := range txn.FileContractResolutions { switch r := res.Resolution.(type) { - case types.FileContractRenewal: + case types.V2FileContractRenewal: if r.InitialRevision.RenterOutput.Value.IsZero() && r.InitialRevision.HostOutput.Value.IsZero() { return fmt.Errorf("file contract renewal %v creates contract with zero value", i) } @@ -746,7 +746,7 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { outputSum = outputSum.Add(fc.RenterOutput.Value).Add(fc.HostOutput.Value).Add(ms.base.V2FileContractTax(fc)) } for _, res := range txn.FileContractResolutions { - if r, ok := res.Resolution.(types.FileContractRenewal); ok { + if r, ok := res.Resolution.(types.V2FileContractRenewal); ok { // a renewal creates a new contract, optionally "rolling over" funds // from the old contract inputSum = inputSum.Add(r.RenterRollover) @@ -815,11 +815,11 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { return errors.New("transaction both resolves a file contract and creates new outputs") } - validateParent := func(fce types.FileContractElement) error { + validateParent := func(fce types.V2FileContractElement) error { if txid, ok := ms.spent(fce.ID); ok { return fmt.Errorf("has already been resolved in transaction %v", txid) - } else if !ms.base.Elements.ContainsUnresolvedFileContractElement(fce) { - if ms.base.Elements.ContainsResolvedFileContractElement(fce) { + } else if !ms.base.Elements.ContainsUnresolvedV2FileContractElement(fce) { + if ms.base.Elements.ContainsResolvedV2FileContractElement(fce) { return errors.New("has already been resolved in a previous block") } return errors.New("is not present in the accumulator") @@ -905,7 +905,7 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } fc := fcr.Parent.V2FileContract switch r := fcr.Resolution.(type) { - case types.FileContractRenewal: + case types.V2FileContractRenewal: renewal := r old, renewed := renewal.FinalRevision, renewal.InitialRevision if old.RevisionNumber != types.MaxRevisionNumber { @@ -950,10 +950,10 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { return fmt.Errorf("file contract storage proof %v has invalid history proof", i) } leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart, types.FileContractID(fcr.Parent.ID)) - if proofRoot(ms.base.StorageProofLeafHash(sp.Leaf[:]), leafIndex, sp.Proof) != fc.FileMerkleRoot { + if storageProofRoot(ms.base.StorageProofLeafHash(sp.Leaf[:]), leafIndex, fc.Filesize, sp.Proof) != fc.FileMerkleRoot { return fmt.Errorf("file contract storage proof %v has root that does not match contract Merkle root", i) } - case types.FileContractExpiration: + case types.V2FileContractExpiration: if ms.base.childHeight() <= fc.ExpirationHeight { return fmt.Errorf("file contract expiration %v cannot be submitted until after expiration height (%v) ", i, fc.ExpirationHeight) } diff --git a/types/encoding.go b/types/encoding.go index c9d4257a..b2e86558 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -567,6 +567,12 @@ func (fc V2FileContract) EncodeTo(e *Encoder) { // EncodeTo implements types.EncoderTo. func (fce FileContractElement) EncodeTo(e *Encoder) { + fce.StateElement.EncodeTo(e) + fce.FileContract.EncodeTo(e) +} + +// EncodeTo implements types.EncoderTo. +func (fce V2FileContractElement) EncodeTo(e *Encoder) { fce.StateElement.EncodeTo(e) fce.V2FileContract.EncodeTo(e) } @@ -578,7 +584,7 @@ func (rev V2FileContractRevision) EncodeTo(e *Encoder) { } // EncodeTo implements types.EncoderTo. -func (ren FileContractRenewal) EncodeTo(e *Encoder) { +func (ren V2FileContractRenewal) EncodeTo(e *Encoder) { ren.FinalRevision.EncodeTo(e) ren.InitialRevision.EncodeTo(e) ren.RenterRollover.EncodeTo(e) @@ -602,13 +608,13 @@ func (sp V2StorageProof) EncodeTo(e *Encoder) { } // EncodeTo implements types.EncoderTo. -func (FileContractExpiration) EncodeTo(e *Encoder) {} +func (V2FileContractExpiration) EncodeTo(e *Encoder) {} // EncodeTo implements types.EncoderTo. -func (res FileContractResolution) EncodeTo(e *Encoder) { +func (res V2FileContractResolution) EncodeTo(e *Encoder) { res.Parent.EncodeTo(e) switch r := res.Resolution.(type) { - case FileContractRenewal: + case V2FileContractRenewal: e.WriteUint8(0) r.EncodeTo(e) case V2StorageProof: @@ -617,7 +623,7 @@ func (res FileContractResolution) EncodeTo(e *Encoder) { case V2FileContract: e.WriteUint8(2) r.EncodeTo(e) - case FileContractExpiration: + case V2FileContractExpiration: e.WriteUint8(3) r.EncodeTo(e) default: @@ -1107,6 +1113,12 @@ func (fc *V2FileContract) DecodeFrom(d *Decoder) { // DecodeFrom implements types.DecoderFrom. func (fce *FileContractElement) DecodeFrom(d *Decoder) { + fce.StateElement.DecodeFrom(d) + fce.FileContract.DecodeFrom(d) +} + +// DecodeFrom implements types.DecoderFrom. +func (fce *V2FileContractElement) DecodeFrom(d *Decoder) { fce.StateElement.DecodeFrom(d) fce.V2FileContract.DecodeFrom(d) } @@ -1118,7 +1130,7 @@ func (rev *V2FileContractRevision) DecodeFrom(d *Decoder) { } // DecodeFrom implements types.DecoderFrom. -func (ren *FileContractRenewal) DecodeFrom(d *Decoder) { +func (ren *V2FileContractRenewal) DecodeFrom(d *Decoder) { ren.FinalRevision.DecodeFrom(d) ren.InitialRevision.DecodeFrom(d) ren.RenterRollover.DecodeFrom(d) @@ -1142,14 +1154,14 @@ func (sp *V2StorageProof) DecodeFrom(d *Decoder) { } // DecodeFrom implements types.DecoderFrom. -func (*FileContractExpiration) DecodeFrom(d *Decoder) {} +func (*V2FileContractExpiration) DecodeFrom(d *Decoder) {} // DecodeFrom implements types.DecoderFrom. -func (res *FileContractResolution) DecodeFrom(d *Decoder) { +func (res *V2FileContractResolution) DecodeFrom(d *Decoder) { res.Parent.DecodeFrom(d) switch t := d.ReadUint8(); t { case 0: - var r FileContractRenewal + var r V2FileContractRenewal r.DecodeFrom(d) res.Resolution = r case 1: @@ -1161,7 +1173,7 @@ func (res *FileContractResolution) DecodeFrom(d *Decoder) { r.DecodeFrom(d) res.Resolution = r case 3: - var r FileContractExpiration + var r V2FileContractExpiration r.DecodeFrom(d) res.Resolution = r default: @@ -1223,7 +1235,7 @@ func (txn *V2Transaction) DecodeFrom(d *Decoder) { } } if fields&(1<<6) != 0 { - txn.FileContractResolutions = make([]FileContractResolution, d.ReadPrefix()) + txn.FileContractResolutions = make([]V2FileContractResolution, d.ReadPrefix()) for i := range txn.FileContractResolutions { txn.FileContractResolutions[i].DecodeFrom(d) } diff --git a/types/types.go b/types/types.go index e683b0a1..7b6fdf1d 100644 --- a/types/types.go +++ b/types/types.go @@ -491,12 +491,12 @@ type V2SiafundInput struct { // A V2FileContractRevision updates the state of an existing file contract. type V2FileContractRevision struct { - Parent FileContractElement `json:"parent"` - Revision V2FileContract `json:"revision"` + Parent V2FileContractElement `json:"parent"` + Revision V2FileContract `json:"revision"` } -// A FileContractResolution closes a file contract's payment channel. There are -// four ways a contract can be resolved: +// A V2FileContractResolution closes a v2 file contract's payment channel. There +// are four ways a contract can be resolved: // // 1) The host can submit a storage proof. This is considered a "valid" // resolution: the RenterOutput and HostOutput fields of the (finalized) @@ -521,23 +521,23 @@ type V2FileContractRevision struct { // ExpirationHeight. Since anyone can submit an expiration, it is generally in // the renter and/or host's interest to submit a different type of resolution // prior to the ExpirationHeight. -type FileContractResolution struct { - Parent FileContractElement `json:"parent"` - Resolution FileContractResolutionType `json:"resolution"` +type V2FileContractResolution struct { + Parent V2FileContractElement `json:"parent"` + Resolution V2FileContractResolutionType `json:"resolution"` } -// FileContractResolutionType enumerates the types of file contract resolution. -type FileContractResolutionType interface { - isFileContractResolution() +// V2FileContractResolutionType enumerates the types of file contract resolution. +type V2FileContractResolutionType interface { + isV2FileContractResolution() } -func (FileContractRenewal) isFileContractResolution() {} -func (V2StorageProof) isFileContractResolution() {} -func (V2FileContract) isFileContractResolution() {} // finalization -func (FileContractExpiration) isFileContractResolution() {} +func (V2FileContractRenewal) isV2FileContractResolution() {} +func (V2StorageProof) isV2FileContractResolution() {} +func (V2FileContract) isV2FileContractResolution() {} // finalization +func (V2FileContractExpiration) isV2FileContractResolution() {} -// A FileContractRenewal renews a file contract. -type FileContractRenewal struct { +// A V2FileContractRenewal renews a file contract. +type V2FileContractRenewal struct { FinalRevision V2FileContract `json:"finalRevision"` InitialRevision V2FileContract `json:"initialRevision"` RenterRollover Currency `json:"renterRollover"` @@ -567,10 +567,10 @@ type V2StorageProof struct { Proof []Hash256 } -// A FileContractExpiration resolves an expired contract. A contract is +// A V2FileContractExpiration resolves an expired contract. A contract is // considered expired when its proof window has elapsed. If the contract is not // storing any data, it will resolve as valid; otherwise, it resolves as missed. -type FileContractExpiration struct{} +type V2FileContractExpiration struct{} // A StateElement is a generic element within the state accumulator. type StateElement struct { @@ -597,6 +597,12 @@ type SiafundElement struct { // A FileContractElement is a storage agreement between a renter and a host. type FileContractElement struct { + StateElement + FileContract +} + +// A V2FileContractElement is a storage agreement between a renter and a host. +type V2FileContractElement struct { StateElement V2FileContract } @@ -616,17 +622,17 @@ type Attestation struct { // A V2Transaction effects a change of blockchain state. type V2Transaction struct { - SiacoinInputs []V2SiacoinInput `json:"siacoinInputs,omitempty"` - SiacoinOutputs []SiacoinOutput `json:"siacoinOutputs,omitempty"` - SiafundInputs []V2SiafundInput `json:"siafundInputs,omitempty"` - SiafundOutputs []SiafundOutput `json:"siafundOutputs,omitempty"` - FileContracts []V2FileContract `json:"fileContracts,omitempty"` - FileContractRevisions []V2FileContractRevision `json:"fileContractRevisions,omitempty"` - FileContractResolutions []FileContractResolution `json:"fileContractResolutions,omitempty"` - Attestations []Attestation `json:"attestations,omitempty"` - ArbitraryData []byte `json:"arbitraryData,omitempty"` - NewFoundationAddress *Address `json:"newFoundationAddress,omitempty"` - MinerFee Currency `json:"minerFee"` + SiacoinInputs []V2SiacoinInput `json:"siacoinInputs,omitempty"` + SiacoinOutputs []SiacoinOutput `json:"siacoinOutputs,omitempty"` + SiafundInputs []V2SiafundInput `json:"siafundInputs,omitempty"` + SiafundOutputs []SiafundOutput `json:"siafundOutputs,omitempty"` + FileContracts []V2FileContract `json:"fileContracts,omitempty"` + FileContractRevisions []V2FileContractRevision `json:"fileContractRevisions,omitempty"` + FileContractResolutions []V2FileContractResolution `json:"fileContractResolutions,omitempty"` + Attestations []Attestation `json:"attestations,omitempty"` + ArbitraryData []byte `json:"arbitraryData,omitempty"` + NewFoundationAddress *Address `json:"newFoundationAddress,omitempty"` + MinerFee Currency `json:"minerFee"` } // ID returns the "semantic hash" of the transaction, covering all of the @@ -744,7 +750,7 @@ func (txn *V2Transaction) DeepCopy() V2Transaction { for i := range c.FileContractRevisions { c.FileContractRevisions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractRevisions[i].Parent.MerkleProof...) } - c.FileContractResolutions = append([]FileContractResolution(nil), c.FileContractResolutions...) + c.FileContractResolutions = append([]V2FileContractResolution(nil), c.FileContractResolutions...) for i := range c.FileContractResolutions { c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) if sp, ok := c.FileContractResolutions[i].Resolution.(V2StorageProof); ok { @@ -1045,29 +1051,29 @@ func (sp *StorageProof) UnmarshalJSON(b []byte) error { } // MarshalJSON implements json.Marshaler. -func (res FileContractResolution) MarshalJSON() ([]byte, error) { +func (res V2FileContractResolution) MarshalJSON() ([]byte, error) { var typ string switch res.Resolution.(type) { - case FileContractRenewal: + case V2FileContractRenewal: typ = "renewal" case V2StorageProof: typ = "storage proof" case V2FileContract: typ = "finalization" - case FileContractExpiration: + case V2FileContractExpiration: typ = "expiration" } return json.Marshal(struct { - Parent FileContractElement `json:"parent"` - Type string `json:"type"` - Resolution FileContractResolutionType `json:"resolution,omitempty"` + Parent V2FileContractElement `json:"parent"` + Type string `json:"type"` + Resolution V2FileContractResolutionType `json:"resolution,omitempty"` }{res.Parent, typ, res.Resolution}) } // UnmarshalJSON implements json.Marshaler. -func (res *FileContractResolution) UnmarshalJSON(b []byte) error { +func (res *V2FileContractResolution) UnmarshalJSON(b []byte) error { var p struct { - Parent FileContractElement + Parent V2FileContractElement Type string Resolution json.RawMessage } @@ -1076,7 +1082,7 @@ func (res *FileContractResolution) UnmarshalJSON(b []byte) error { } switch p.Type { case "renewal": - var r FileContractRenewal + var r V2FileContractRenewal if err := json.Unmarshal(p.Resolution, &r); err != nil { return err } @@ -1094,7 +1100,7 @@ func (res *FileContractResolution) UnmarshalJSON(b []byte) error { } res.Resolution = r case "expiration": - var r FileContractExpiration + var r V2FileContractExpiration if err := json.Unmarshal(p.Resolution, &r); err != nil { return err } From 060406db73f6004c73b6541ebb7c62f3f5677ba3 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Wed, 26 Jul 2023 11:06:49 -0400 Subject: [PATCH 12/53] consensus: Use elements for v1 transactions too --- chain/db.go | 404 +++++++++++++++------------------- consensus/merkle.go | 203 ++++++++++++++--- consensus/state.go | 7 +- consensus/update.go | 460 ++++++++++++++------------------------- consensus/update_test.go | 46 ++-- consensus/validation.go | 103 +++++---- types/encoding.go | 12 + types/types.go | 14 ++ 8 files changed, 604 insertions(+), 645 deletions(-) diff --git a/chain/db.go b/chain/db.go index f309629a..3df35762 100644 --- a/chain/db.go +++ b/chain/db.go @@ -5,7 +5,7 @@ import ( "encoding/binary" "errors" "fmt" - "io" + "math/bits" "time" "go.sia.tech/core/consensus" @@ -187,15 +187,15 @@ func (b *dbBucket) delete(key []byte) { } var ( - bVersion = []byte("Version") - bMainChain = []byte("MainChain") - bCheckpoints = []byte("Checkpoints") - bFileContracts = []byte("FileContracts") - bSiacoinOutputs = []byte("SiacoinOutputs") - bSiafundOutputs = []byte("SiafundOutputs") - - keyFoundationOutputs = []byte("FoundationOutputs") - keyHeight = []byte("Height") + bVersion = []byte("Version") + bMainChain = []byte("MainChain") + bCheckpoints = []byte("Checkpoints") + bFileContractElements = []byte("FileContracts") + bSiacoinElements = []byte("SiacoinElements") + bSiafundElements = []byte("SiafundElements") + bTree = []byte("Tree") + + keyHeight = []byte("Height") ) // DBStore implements Store using a key-value database. @@ -242,250 +242,201 @@ func (db *DBStore) putCheckpoint(c Checkpoint) { db.bucket(bCheckpoints).put(c.State.Index.ID[:], c) } -func (db *DBStore) putSiacoinOutput(id types.SiacoinOutputID, sco types.SiacoinOutput) { - db.bucket(bSiacoinOutputs).put(id[:], sco) -} - -func (db *DBStore) deleteSiacoinOutput(id types.SiacoinOutputID) { - db.bucket(bSiacoinOutputs).delete(id[:]) -} - -func (db *DBStore) putFileContract(id types.FileContractID, fc types.FileContract) { - b := db.bucket(bFileContracts) - b.put(id[:], fc) - - key := db.encHeight(fc.WindowEnd) - b.putRaw(key, append(b.getRaw(key), id[:]...)) -} - -func (db *DBStore) reviseFileContract(id types.FileContractID, fc types.FileContract) { - db.bucket(bFileContracts).put(id[:], fc) -} - -func (db *DBStore) deleteFileContracts(fcds []consensus.FileContractDiff) { - byHeight := make(map[uint64][]types.FileContractID) - b := db.bucket(bFileContracts) - for _, fcd := range fcds { - var fc types.FileContract - if !b.get(fcd.ID[:], &fc) { - check(fmt.Errorf("missing file contract %v", fcd.ID)) - } - b.delete(fcd.ID[:]) - byHeight[fc.WindowEnd] = append(byHeight[fc.WindowEnd], fcd.ID) - } - - for height, ids := range byHeight { - toDelete := make(map[types.FileContractID]struct{}) - for _, id := range ids { - toDelete[id] = struct{}{} - } - key := db.encHeight(height) - val := append([]byte(nil), b.getRaw(key)...) - for i := 0; i < len(val); i += 32 { - id := *(*types.FileContractID)(val[i:]) - if _, ok := toDelete[id]; ok { - copy(val[i:], val[len(val)-32:]) - val = val[:len(val)-32] - i -= 32 - delete(toDelete, id) - } - } - b.putRaw(key, val) - if len(toDelete) != 0 { - check(errors.New("missing expired file contract(s)")) - } +func (db *DBStore) encLeaf(index uint64, height int) []byte { + // For a given leaf index and height, we want to compute a key corresponding + // to the tree node at the given height within the leaf's proof path. For + // example, if height is 3, then we should return the same key for indices + // 0, 1, 2, and 3 (since all of these leaves share a parent at that height), + // and a different key for indices 4, 5, 6, and 7. + // + // This is easily achieved by masking the least significant height bits of + // index and prepending the height (to avoid collisions with lower levels). + // We can assume that the total number of elements is less than 2^32 (and + // thus the height will be less than 2^8), so the resulting key is 5 bytes. + // + // Can we do better? Yes -- we can fit it in 4 bytes, if we assume that the + // total number of elements is less than 2^31. This gives us 2^31 values for + // storing leaves, and 2^31 values for storing all the other nodes. We + // distinguish them by setting the top bit. Going up a level, we see that at + // most 2^30 values are needed, leaving 2^30 for the remaining levels; we + // distinguish these by setting the penultimate bit. Each time we ascend a + // level, we have one fewer bit to work with; but since each level requires + // half as many nodes as the previous, it balances out and we always have + // enough space. + return binary.BigEndian.AppendUint32(nil, bits.RotateLeft32(uint32(index)|((1<= 0; i-- { td := diff.Transactions[i] - for _, fcd := range td.ValidFileContracts { - db.putFileContract(fcd.ID, fcd.Contract) + for _, fce := range td.ValidFileContracts { + db.putFileContract(fce) + } + for _, fcer := range td.RevisedFileContracts { + db.deleteFileContract(types.FileContractID(fcer.Parent.ID), fcer.Revision.WindowEnd) + db.putFileContract(fcer.Parent) + } + for _, sfe := range td.SpentSiafundElements { + db.putSiafundElement(sfe) + } + for _, sce := range td.SpentSiacoinElements { + db.putSiacoinElement(sce) + } + for _, fce := range td.CreatedFileContracts { + db.deleteFileContract(types.FileContractID(fce.ID), fce.WindowEnd) } - for _, fcrd := range td.RevisedFileContracts { - db.reviseFileContract(fcrd.ID, fcrd.OldContract) + for _, sfe := range td.CreatedSiafundElements { + db.deleteSiafundElement(types.SiafundOutputID(sfe.ID)) } - for _, sfod := range td.SpentSiafundOutputs { - db.putSiafundOutput(sfod.ID, sfod.Output, sfod.ClaimStart) + for _, sce := range td.CreatedSiacoinElements { + db.deleteSiacoinElement(types.SiacoinOutputID(sce.ID)) } - for _, scod := range td.SpentSiacoinOutputs { - db.putSiacoinOutput(scod.ID, scod.Output) + } + + for i := len(diff.V2Transactions) - 1; i >= 0; i-- { + td := diff.V2Transactions[i] + for _, sfe := range td.SpentSiafundElements { + db.putSiafundElement(sfe) } - db.deleteFileContracts(td.CreatedFileContracts) - for _, sfod := range td.CreatedSiafundOutputs { - db.deleteSiafundOutput(sfod.ID) + for _, sce := range td.SpentSiacoinElements { + db.putSiacoinElement(sce) } - db.deleteDelayedSiacoinOutputs(td.ImmatureSiacoinOutputs) - for _, scod := range td.CreatedSiacoinOutputs { - db.deleteSiacoinOutput(scod.ID) + for _, sfe := range td.CreatedSiafundElements { + db.deleteSiafundElement(types.SiafundOutputID(sfe.ID)) + } + for _, sce := range td.CreatedSiacoinElements { + db.deleteSiacoinElement(types.SiacoinOutputID(sce.ID)) } } + + // TODO: proofs!!!! } // BestIndex implements consensus.Store. @@ -510,21 +461,23 @@ func (db *DBStore) AncestorTimestamp(id types.BlockID, n uint64) time.Time { return c.Block.Timestamp } -// SiacoinOutput implements consensus.Store. -func (db *DBStore) SiacoinOutput(id types.SiacoinOutputID) (sco types.SiacoinOutput, ok bool) { - ok = db.bucket(bSiacoinOutputs).get(id[:], &sco) +// SiacoinElement implements consensus.Store. +func (db *DBStore) SiacoinElement(id types.SiacoinOutputID) (sce types.SiacoinElement, ok bool) { + ok = db.bucket(bSiacoinElements).get(id[:], &sce) + sce.MerkleProof = db.getElementProof(sce.LeafIndex) return } -// FileContract implements consensus.Store. -func (db *DBStore) FileContract(id types.FileContractID) (fc types.FileContract, ok bool) { - ok = db.bucket(bFileContracts).get(id[:], &fc) +// FileContractElement implements consensus.Store. +func (db *DBStore) FileContractElement(id types.FileContractID) (fce types.FileContractElement, ok bool) { + ok = db.bucket(bFileContractElements).get(id[:], &fce) + fce.MerkleProof = db.getElementProof(fce.LeafIndex) return } // MissedFileContracts implements consensus.Store. func (db *DBStore) MissedFileContracts(height uint64) (fcids []types.FileContractID) { - ids := db.bucket(bFileContracts).getRaw(db.encHeight(height)) + ids := db.bucket(bFileContractElements).getRaw(db.encHeight(height)) for i := 0; i < len(ids); i += 32 { fcids = append(fcids, *(*types.FileContractID)(ids[i:])) } @@ -532,27 +485,9 @@ func (db *DBStore) MissedFileContracts(height uint64) (fcids []types.FileContrac } // SiafundOutput implements consensus.Store. -func (db *DBStore) SiafundOutput(id types.SiafundOutputID) (sfo types.SiafundOutput, claimStart types.Currency, ok bool) { - var csfo claimSFO - ok = db.bucket(bSiafundOutputs).get(id[:], &csfo) - return csfo.Output, csfo.ClaimStart, ok -} - -// MaturedSiacoinOutputs implements consensus.Store. -func (db *DBStore) MaturedSiacoinOutputs(height uint64) (dscods []consensus.DelayedSiacoinOutputDiff) { - dscos := db.bucket(bSiacoinOutputs).getRaw(db.encHeight(height)) - d := types.NewBufDecoder(dscos) - for { - var dscod consensus.DelayedSiacoinOutputDiff - dscod.DecodeFrom(d) - if d.Err() != nil { - break - } - dscods = append(dscods, dscod) - } - if !errors.Is(d.Err(), io.EOF) { - check(d.Err()) - } +func (db *DBStore) SiafundElement(id types.SiafundOutputID) (sfe types.SiafundElement, ok bool) { + ok = db.bucket(bSiafundElements).get(id[:], &sfe) + sfe.MerkleProof = db.getElementProof(sfe.LeafIndex) return } @@ -587,7 +522,7 @@ func (db *DBStore) flush() { // ApplyDiff implements Store. func (db *DBStore) ApplyDiff(s consensus.State, diff consensus.BlockDiff, mustCommit bool) (committed bool) { db.applyState(s) - db.applyDiff(s, diff) + db.applyDiff(diff) committed = mustCommit || db.shouldFlush() if committed { db.flush() @@ -597,7 +532,7 @@ func (db *DBStore) ApplyDiff(s consensus.State, diff consensus.BlockDiff, mustCo // RevertDiff implements Store. func (db *DBStore) RevertDiff(s consensus.State, diff consensus.BlockDiff) { - db.revertDiff(s, diff) + db.revertDiff(diff) db.revertState(s) if db.shouldFlush() { db.flush() @@ -637,9 +572,10 @@ func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBSto bVersion, bMainChain, bCheckpoints, - bFileContracts, - bSiacoinOutputs, - bSiafundOutputs, + bFileContractElements, + bSiacoinElements, + bSiafundElements, + bTree, } { if _, err := db.CreateBucket(bucket); err != nil { panic(err) @@ -653,7 +589,7 @@ func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBSto diff := consensus.ApplyDiff(genesisState, dbs, genesisBlock) dbs.putCheckpoint(Checkpoint{genesisBlock, cs, &diff}) dbs.applyState(cs) - dbs.applyDiff(cs, diff) + dbs.applyDiff(diff) dbs.flush() } else if dbGenesis.ID != genesisBlock.ID() { // try to detect network so we can provide a more helpful error message diff --git a/consensus/merkle.go b/consensus/merkle.go index 32d949b4..69497a44 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -20,9 +20,6 @@ func mergeHeight(x, y uint64) int { return bits.Len64(x ^ y) } // clearBits clears the n least significant bits of x. func clearBits(x uint64, n int) uint64 { return x &^ (1< startOfNewTree && j >= 0; j-- { - leaves[j].MerkleProof = append(leaves[j].MerkleProof, oldRoot) + element(j).MerkleProof = append(element(j).MerkleProof, oldRoot) } for ; j > startOfOldTree && j >= 0; j-- { - leaves[j].MerkleProof = append(leaves[j].MerkleProof, h) + element(j).MerkleProof = append(element(j).MerkleProof, h) } // Record the left- and right-hand roots in treeGrowth, where // applicable. @@ -306,7 +332,39 @@ func (acc *ElementAccumulator) addLeaves(leaves []ElementLeaf) [64][]types.Hash2 // root is always the left-hand sibling. h = blake2b.SumPair(oldRoot, h) } + return } + + i := 0 + for _, tdiff := range diff.Transactions { + for _, sce := range tdiff.CreatedSiacoinElements { + sce.LeafIndex = add(i, SiacoinLeaf(sce, false).ElementHash) + i++ + } + for _, sfe := range tdiff.CreatedSiafundElements { + sfe.LeafIndex = add(i, SiafundLeaf(sfe, false).ElementHash) + i++ + } + for _, fce := range tdiff.CreatedFileContracts { + fce.LeafIndex = add(i, FileContractLeaf(fce, false).ElementHash) + i++ + } + } + for _, tdiff := range diff.V2Transactions { + for _, sce := range tdiff.CreatedSiacoinElements { + sce.LeafIndex = add(i, SiacoinLeaf(sce, false).ElementHash) + i++ + } + for _, sfe := range tdiff.CreatedSiafundElements { + sfe.LeafIndex = add(i, SiafundLeaf(sfe, false).ElementHash) + i++ + } + for _, fce := range tdiff.CreatedFileContracts { + fce.LeafIndex = add(i, V2FileContractLeaf(fce, false).ElementHash) + i++ + } + } + return treeGrowth } @@ -318,7 +376,66 @@ func splitLeaves(ls []ElementLeaf, mid uint64) (left, right []ElementLeaf) { // updateLeaves overwrites the specified leaves in the accumulator. It updates // the Merkle proofs of each leaf, and returns the leaves (grouped by tree) for // later use. -func (acc *ElementAccumulator) updateLeaves(leaves []ElementLeaf) [64][]ElementLeaf { +func (acc *ElementAccumulator) updateLeaves(diff *BlockDiff) [64][]ElementLeaf { + var leaves []ElementLeaf + for _, tdiff := range diff.Transactions { + for _, sce := range tdiff.SpentSiacoinElements { + leaves = append(leaves, SiacoinLeaf(sce, true)) + } + for _, sfe := range tdiff.SpentSiafundElements { + leaves = append(leaves, SiafundLeaf(sfe, true)) + } + for _, fce := range tdiff.ValidFileContracts { + leaves = append(leaves, FileContractLeaf(fce, true)) + } + for _, fcer := range tdiff.RevisedFileContracts { + leaves = append(leaves, FileContractLeaf(fcer.Parent, false)) + } + } + for _, fce := range diff.MissedFileContracts { + leaves = append(leaves, FileContractLeaf(fce, true)) + } + for _, tdiff := range diff.V2Transactions { + for _, sce := range tdiff.SpentSiacoinElements { + leaves = append(leaves, SiacoinLeaf(sce, true)) + } + for _, sfe := range tdiff.SpentSiafundElements { + leaves = append(leaves, SiafundLeaf(sfe, true)) + } + for _, fcr := range tdiff.RevisedFileContracts { + fce := fcr.Parent + fce.V2FileContract = fcr.Revision + leaves = append(leaves, V2FileContractLeaf(fce, false)) + } + for _, res := range tdiff.ResolvedFileContracts { + fce := res.Parent + switch r := res.Resolution.(type) { + case types.V2FileContractRenewal: + fce.V2FileContract = r.FinalRevision + case types.V2FileContract: // finalization + fce.V2FileContract = r + } + leaves = append(leaves, V2FileContractLeaf(fce, true)) + } + } + + // Group leaves by tree, and sort them by leaf index. + var trees [64][]ElementLeaf + sort.Slice(leaves, func(i, j int) bool { + if len(leaves[i].MerkleProof) != len(leaves[j].MerkleProof) { + return len(leaves[i].MerkleProof) < len(leaves[j].MerkleProof) + } + return leaves[i].LeafIndex < leaves[j].LeafIndex + }) + for len(leaves) > 0 { + i := 0 + for i < len(leaves) && len(leaves[i].MerkleProof) == len(leaves[0].MerkleProof) { + i++ + } + trees[len(leaves[0].MerkleProof)] = leaves[:i] + leaves = leaves[i:] + } + var recompute func(i, j uint64, leaves []ElementLeaf) types.Hash256 recompute = func(i, j uint64, leaves []ElementLeaf) types.Hash256 { height := bits.TrailingZeros64(j - i) // equivalent to log2(j-i), as j-i is always a power of two @@ -347,23 +464,6 @@ func (acc *ElementAccumulator) updateLeaves(leaves []ElementLeaf) [64][]ElementL return blake2b.SumPair(leftRoot, rightRoot) } - // Group leaves by tree, and sort them by leaf index. - var trees [64][]ElementLeaf - sort.Slice(leaves, func(i, j int) bool { - if len(leaves[i].MerkleProof) != len(leaves[j].MerkleProof) { - return len(leaves[i].MerkleProof) < len(leaves[j].MerkleProof) - } - return leaves[i].LeafIndex < leaves[j].LeafIndex - }) - for len(leaves) > 0 { - i := 0 - for i < len(leaves) && len(leaves[i].MerkleProof) == len(leaves[0].MerkleProof) { - i++ - } - trees[len(leaves[0].MerkleProof)] = leaves[:i] - leaves = leaves[i:] - } - // Recompute the root of each tree with updated leaves, and fill in the // proof of each leaf. for height, leaves := range &trees { @@ -378,14 +478,53 @@ func (acc *ElementAccumulator) updateLeaves(leaves []ElementLeaf) [64][]ElementL end := start + 1< OutputSourceFoundation { - return fmt.Sprintf("DelayedOutputSource(%d)", d) - } - return [...]string{ - OutputSourceMiner: "miner payout", - OutputSourceValidContract: "valid contract", - OutputSourceMissedContract: "missed contract", - OutputSourceSiafundClaim: "siafund claim", - OutputSourceFoundation: "foundation subsidy", - }[d] -} - -// MarshalText implements encoding.TextMarshaler. -func (d DelayedOutputSource) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (d *DelayedOutputSource) UnmarshalText(b []byte) error { - switch string(b) { - case OutputSourceMiner.String(): - *d = OutputSourceMiner - case OutputSourceValidContract.String(): - *d = OutputSourceValidContract - case OutputSourceMissedContract.String(): - *d = OutputSourceMissedContract - case OutputSourceSiafundClaim.String(): - *d = OutputSourceSiafundClaim - case OutputSourceFoundation.String(): - *d = OutputSourceFoundation - default: - return fmt.Errorf("unrecognized DelayedOutputSource %q", b) - } - return nil -} - -// A DelayedSiacoinOutputDiff records the creation, deletion, or maturation of a -// delayed SiacoinOutput. "Delayed" means that the output is immature when -// created; it may only be spent when the "MaturityHeight" is reached. -type DelayedSiacoinOutputDiff struct { - ID types.SiacoinOutputID `json:"ID"` - Output types.SiacoinOutput `json:"output"` - Source DelayedOutputSource `json:"source"` - MaturityHeight uint64 `json:"maturityHeight"` -} - -// EncodeTo implements types.EncoderTo. -func (dscod DelayedSiacoinOutputDiff) EncodeTo(e *types.Encoder) { - dscod.ID.EncodeTo(e) - dscod.Output.EncodeTo(e) - e.WriteUint8(uint8(dscod.Source)) - e.WriteUint64(dscod.MaturityHeight) -} - -// DecodeFrom implements types.DecoderFrom. -func (dscod *DelayedSiacoinOutputDiff) DecodeFrom(d *types.Decoder) { - dscod.ID.DecodeFrom(d) - dscod.Output.DecodeFrom(d) - dscod.Source = DelayedOutputSource(d.ReadUint8()) - dscod.MaturityHeight = d.ReadUint64() -} - -// A SiafundOutputDiff records the creation, deletion, or spending of a -// SiafundOutput. -type SiafundOutputDiff struct { - ID types.SiafundOutputID `json:"ID"` - Output types.SiafundOutput `json:"output"` - ClaimStart types.Currency `json:"claimStart"` -} - -// EncodeTo implements types.EncoderTo. -func (sfod SiafundOutputDiff) EncodeTo(e *types.Encoder) { - sfod.ID.EncodeTo(e) - sfod.Output.EncodeTo(e) - sfod.ClaimStart.EncodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom. -func (sfod *SiafundOutputDiff) DecodeFrom(d *types.Decoder) { - sfod.ID.DecodeFrom(d) - sfod.Output.DecodeFrom(d) - sfod.ClaimStart.DecodeFrom(d) -} - -// A FileContractDiff records the creation, deletion, or resolution of a -// FileContract. -type FileContractDiff struct { - ID types.FileContractID `json:"ID"` - Contract types.FileContract `json:"contract"` -} - -// EncodeTo implements types.EncoderTo. -func (fcd FileContractDiff) EncodeTo(e *types.Encoder) { - fcd.ID.EncodeTo(e) - fcd.Contract.EncodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom. -func (fcd *FileContractDiff) DecodeFrom(d *types.Decoder) { - fcd.ID.DecodeFrom(d) - fcd.Contract.DecodeFrom(d) -} - -// A FileContractRevisionDiff records the revision of a FileContract. -type FileContractRevisionDiff struct { - ID types.FileContractID `json:"ID"` - OldContract types.FileContract `json:"oldContract"` - NewContract types.FileContract `json:"newContract"` -} - -// EncodeTo implements types.EncoderTo. -func (fcrd FileContractRevisionDiff) EncodeTo(e *types.Encoder) { - fcrd.ID.EncodeTo(e) - fcrd.OldContract.EncodeTo(e) - fcrd.NewContract.EncodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom. -func (fcrd *FileContractRevisionDiff) DecodeFrom(d *types.Decoder) { - fcrd.ID.DecodeFrom(d) - fcrd.OldContract.DecodeFrom(d) - fcrd.NewContract.DecodeFrom(d) -} - // A TransactionDiff represents the changes to an ElementStore resulting from // the application of a transaction. type TransactionDiff struct { - CreatedSiacoinOutputs []SiacoinOutputDiff `json:"createdSiacoinOutputs,omitempty"` - ImmatureSiacoinOutputs []DelayedSiacoinOutputDiff `json:"immatureSiacoinOutputs,omitempty"` - CreatedSiafundOutputs []SiafundOutputDiff `json:"createdSiafundOutputs,omitempty"` - CreatedFileContracts []FileContractDiff `json:"createdFileContracts,omitempty"` - - SpentSiacoinOutputs []SiacoinOutputDiff `json:"spentSiacoinOutputs,omitempty"` - SpentSiafundOutputs []SiafundOutputDiff `json:"spentSiafundOutputs,omitempty"` - RevisedFileContracts []FileContractRevisionDiff `json:"revisedFileContracts,omitempty"` - ValidFileContracts []FileContractDiff `json:"validFileContracts,omitempty"` + CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` + CreatedSiafundElements []types.SiafundElement `json:"createdSiafundElements,omitempty"` + CreatedFileContracts []types.FileContractElement `json:"createdFileContracts,omitempty"` + + SpentSiacoinElements []types.SiacoinElement `json:"spentSiacoinElements,omitempty"` + SpentSiafundElements []types.SiafundElement `json:"spentSiafundElements,omitempty"` + RevisedFileContracts []types.FileContractElementRevision `json:"revisedFileContracts,omitempty"` + ValidFileContracts []types.FileContractElement `json:"validFileContracts,omitempty"` } // EncodeTo implements types.EncoderTo. func (td TransactionDiff) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(td.CreatedSiacoinOutputs)) - for i := range td.CreatedSiacoinOutputs { - td.CreatedSiacoinOutputs[i].EncodeTo(e) - } - e.WritePrefix(len(td.ImmatureSiacoinOutputs)) - for i := range td.ImmatureSiacoinOutputs { - td.ImmatureSiacoinOutputs[i].EncodeTo(e) + e.WritePrefix(len(td.CreatedSiacoinElements)) + for i := range td.CreatedSiacoinElements { + td.CreatedSiacoinElements[i].EncodeTo(e) } - e.WritePrefix(len(td.CreatedSiafundOutputs)) - for i := range td.CreatedSiafundOutputs { - td.CreatedSiafundOutputs[i].EncodeTo(e) + e.WritePrefix(len(td.CreatedSiafundElements)) + for i := range td.CreatedSiafundElements { + td.CreatedSiafundElements[i].EncodeTo(e) } e.WritePrefix(len(td.CreatedFileContracts)) for i := range td.CreatedFileContracts { td.CreatedFileContracts[i].EncodeTo(e) } - e.WritePrefix(len(td.SpentSiacoinOutputs)) - for i := range td.SpentSiacoinOutputs { - td.SpentSiacoinOutputs[i].EncodeTo(e) + e.WritePrefix(len(td.SpentSiacoinElements)) + for i := range td.SpentSiacoinElements { + td.SpentSiacoinElements[i].EncodeTo(e) } - e.WritePrefix(len(td.SpentSiafundOutputs)) - for i := range td.SpentSiafundOutputs { - td.SpentSiafundOutputs[i].EncodeTo(e) + e.WritePrefix(len(td.SpentSiafundElements)) + for i := range td.SpentSiafundElements { + td.SpentSiafundElements[i].EncodeTo(e) } e.WritePrefix(len(td.RevisedFileContracts)) for i := range td.RevisedFileContracts { @@ -488,35 +324,31 @@ func (td TransactionDiff) EncodeTo(e *types.Encoder) { // DecodeFrom implements types.DecoderFrom. func (td *TransactionDiff) DecodeFrom(d *types.Decoder) { - td.CreatedSiacoinOutputs = make([]SiacoinOutputDiff, d.ReadPrefix()) - for i := range td.CreatedSiacoinOutputs { - td.CreatedSiacoinOutputs[i].DecodeFrom(d) - } - td.ImmatureSiacoinOutputs = make([]DelayedSiacoinOutputDiff, d.ReadPrefix()) - for i := range td.ImmatureSiacoinOutputs { - td.ImmatureSiacoinOutputs[i].DecodeFrom(d) + td.CreatedSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) + for i := range td.CreatedSiacoinElements { + td.CreatedSiacoinElements[i].DecodeFrom(d) } - td.CreatedSiafundOutputs = make([]SiafundOutputDiff, d.ReadPrefix()) - for i := range td.CreatedSiafundOutputs { - td.CreatedSiafundOutputs[i].DecodeFrom(d) + td.CreatedSiafundElements = make([]types.SiafundElement, d.ReadPrefix()) + for i := range td.CreatedSiafundElements { + td.CreatedSiafundElements[i].DecodeFrom(d) } - td.CreatedFileContracts = make([]FileContractDiff, d.ReadPrefix()) + td.CreatedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) for i := range td.CreatedFileContracts { td.CreatedFileContracts[i].DecodeFrom(d) } - td.SpentSiacoinOutputs = make([]SiacoinOutputDiff, d.ReadPrefix()) - for i := range td.SpentSiacoinOutputs { - td.SpentSiacoinOutputs[i].DecodeFrom(d) + td.SpentSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) + for i := range td.SpentSiacoinElements { + td.SpentSiacoinElements[i].DecodeFrom(d) } - td.SpentSiafundOutputs = make([]SiafundOutputDiff, d.ReadPrefix()) - for i := range td.SpentSiafundOutputs { - td.SpentSiafundOutputs[i].DecodeFrom(d) + td.SpentSiafundElements = make([]types.SiafundElement, d.ReadPrefix()) + for i := range td.SpentSiafundElements { + td.SpentSiafundElements[i].DecodeFrom(d) } - td.RevisedFileContracts = make([]FileContractRevisionDiff, d.ReadPrefix()) + td.RevisedFileContracts = make([]types.FileContractElementRevision, d.ReadPrefix()) for i := range td.RevisedFileContracts { td.RevisedFileContracts[i].DecodeFrom(d) } - td.ValidFileContracts = make([]FileContractDiff, d.ReadPrefix()) + td.ValidFileContracts = make([]types.FileContractElement, d.ReadPrefix()) for i := range td.ValidFileContracts { td.ValidFileContracts[i].DecodeFrom(d) } @@ -528,6 +360,13 @@ type V2TransactionDiff struct { CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` CreatedSiafundElements []types.SiafundElement `json:"createdSiafundElements,omitempty"` CreatedFileContracts []types.V2FileContractElement `json:"createdFileContracts,omitempty"` + + // NOTE: these fields are all easily derived from the block itself; we + // include them for convenience + SpentSiacoinElements []types.SiacoinElement `json:"spentSiacoinElements,omitempty"` + SpentSiafundElements []types.SiafundElement `json:"spentSiafundElements,omitempty"` + RevisedFileContracts []types.V2FileContractRevision `json:"revisedFileContracts,omitempty"` + ResolvedFileContracts []types.V2FileContractResolution `json:"resolvedFileContracts,omitempty"` } // EncodeTo implements types.EncoderTo. @@ -565,11 +404,12 @@ func (td *V2TransactionDiff) DecodeFrom(d *types.Decoder) { // A BlockDiff represents the changes to blockchain state resulting from the // application of a block. type BlockDiff struct { - Transactions []TransactionDiff `json:"transactions,omitempty"` - V2Transactions []V2TransactionDiff `json:"v2Transactions,omitempty"` - MaturedSiacoinOutputs []DelayedSiacoinOutputDiff `json:"maturedSiacoinOutputs,omitempty"` - ImmatureSiacoinOutputs []DelayedSiacoinOutputDiff `json:"immatureSiacoinOutputs,omitempty"` - MissedFileContracts []FileContractDiff `json:"missedFileContracts,omitempty"` + Transactions []TransactionDiff `json:"transactions,omitempty"` + V2Transactions []V2TransactionDiff `json:"v2Transactions,omitempty"` + CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` + MissedFileContracts []types.FileContractElement `json:"missedFileContracts,omitempty"` + ElementApplyUpdate ElementApplyUpdate `json:"-"` + HistoryApplyUpdate HistoryApplyUpdate `json:"-"` } // EncodeTo implements types.EncoderTo. @@ -578,13 +418,13 @@ func (bd BlockDiff) EncodeTo(e *types.Encoder) { for i := range bd.Transactions { bd.Transactions[i].EncodeTo(e) } - e.WritePrefix(len(bd.ImmatureSiacoinOutputs)) - for i := range bd.ImmatureSiacoinOutputs { - bd.ImmatureSiacoinOutputs[i].EncodeTo(e) + e.WritePrefix(len(bd.V2Transactions)) + for i := range bd.V2Transactions { + bd.V2Transactions[i].EncodeTo(e) } - e.WritePrefix(len(bd.MaturedSiacoinOutputs)) - for i := range bd.MaturedSiacoinOutputs { - bd.MaturedSiacoinOutputs[i].EncodeTo(e) + e.WritePrefix(len(bd.CreatedSiacoinElements)) + for i := range bd.CreatedSiacoinElements { + bd.CreatedSiacoinElements[i].EncodeTo(e) } e.WritePrefix(len(bd.MissedFileContracts)) for i := range bd.MissedFileContracts { @@ -598,15 +438,15 @@ func (bd *BlockDiff) DecodeFrom(d *types.Decoder) { for i := range bd.Transactions { bd.Transactions[i].DecodeFrom(d) } - bd.ImmatureSiacoinOutputs = make([]DelayedSiacoinOutputDiff, d.ReadPrefix()) - for i := range bd.ImmatureSiacoinOutputs { - bd.ImmatureSiacoinOutputs[i].DecodeFrom(d) + bd.V2Transactions = make([]V2TransactionDiff, d.ReadPrefix()) + for i := range bd.V2Transactions { + bd.V2Transactions[i].DecodeFrom(d) } - bd.MaturedSiacoinOutputs = make([]DelayedSiacoinOutputDiff, d.ReadPrefix()) - for i := range bd.MaturedSiacoinOutputs { - bd.MaturedSiacoinOutputs[i].DecodeFrom(d) + bd.CreatedSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) + for i := range bd.CreatedSiacoinElements { + bd.CreatedSiacoinElements[i].DecodeFrom(d) } - bd.MissedFileContracts = make([]FileContractDiff, d.ReadPrefix()) + bd.MissedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) for i := range bd.MissedFileContracts { bd.MissedFileContracts[i].DecodeFrom(d) } @@ -619,26 +459,38 @@ func (ms *MidState) ApplyTransaction(store Store, txn types.Transaction) { ms.spends[types.Hash256(sci.ParentID)] = txid } for i, sco := range txn.SiacoinOutputs { - ms.scos[txn.SiacoinOutputID(i)] = sco + scoid := txn.SiacoinOutputID(i) + ms.sces[scoid] = types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(scoid)}, + SiacoinOutput: sco, + MaturityHeight: 0, + } } for _, sfi := range txn.SiafundInputs { ms.spends[types.Hash256(sfi.ParentID)] = txid } for i, sfo := range txn.SiafundOutputs { sfoid := txn.SiafundOutputID(i) - ms.sfos[sfoid] = sfo - ms.claims[sfoid] = ms.siafundPool + ms.sfes[sfoid] = types.SiafundElement{ + StateElement: types.StateElement{ID: types.Hash256(sfoid)}, + SiafundOutput: sfo, + ClaimStart: ms.siafundPool, + } } for i, fc := range txn.FileContracts { - ms.fcs[txn.FileContractID(i)] = fc + fcid := txn.FileContractID(i) + ms.fces[fcid] = types.FileContractElement{ + StateElement: types.StateElement{ID: types.Hash256(fcid)}, + FileContract: fc, + } ms.siafundPool = ms.siafundPool.Add(ms.base.FileContractTax(fc)) } for _, fcr := range txn.FileContractRevisions { - fc := ms.mustFileContract(store, fcr.ParentID) - newContract := fcr.FileContract - newContract.Payout = fc.Payout // see types.FileContractRevision docstring - ms.fcs[fcr.ParentID] = newContract - ms.fcs[contractRevisionID(fcr.ParentID, fcr.RevisionNumber)] = fc // store previous revision for Diff later + fce := ms.mustFileContractElement(store, fcr.ParentID) + ms.fces[contractRevisionID(fcr.ParentID, fcr.RevisionNumber)] = fce // store previous revision for Diff later + fcr.FileContract.Payout = fce.Payout + fce.FileContract = fcr.FileContract + ms.fces[fcr.ParentID] = fce } for _, sp := range txn.StorageProofs { ms.spends[types.Hash256(sp.ParentID)] = txid @@ -652,22 +504,36 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { ms.spends[sci.Parent.ID] = txid } for i, sco := range txn.SiacoinOutputs { - ms.scos[v2SiacoinOutputID(txid, i)] = sco + scoid := v2SiacoinOutputID(txid, i) + ms.sces[scoid] = types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(scoid)}, + SiacoinOutput: sco, + MaturityHeight: 0, + } } for _, sfi := range txn.SiafundInputs { ms.spends[sfi.Parent.ID] = txid } for i, sfo := range txn.SiafundOutputs { sfoid := v2SiafundOutputID(txid, i) - ms.sfos[sfoid] = sfo - ms.claims[sfoid] = ms.siafundPool + ms.sfes[sfoid] = types.SiafundElement{ + StateElement: types.StateElement{ID: types.Hash256(sfoid)}, + SiafundOutput: sfo, + ClaimStart: ms.siafundPool, + } } for i, fc := range txn.FileContracts { - ms.v2fcs[v2FileContractID(txid, i)] = fc + fcid := v2FileContractID(txid, i) + ms.v2fces[fcid] = types.V2FileContractElement{ + StateElement: types.StateElement{ID: types.Hash256(fcid)}, + V2FileContract: fc, + } ms.siafundPool = ms.siafundPool.Add(ms.base.V2FileContractTax(fc)) } for _, fcr := range txn.FileContractRevisions { - ms.v2fcs[types.FileContractID(fcr.Parent.ID)] = fcr.Revision + fce := fcr.Parent + fce.V2FileContract = fcr.Revision + ms.v2fces[types.FileContractID(fcr.Parent.ID)] = fce } for _, res := range txn.FileContractResolutions { ms.spends[res.Parent.ID] = txid @@ -686,72 +552,59 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { for _, txn := range b.Transactions { var tdiff TransactionDiff for _, sci := range txn.SiacoinInputs { - tdiff.SpentSiacoinOutputs = append(tdiff.SpentSiacoinOutputs, SiacoinOutputDiff{ - ID: sci.ParentID, - Output: ms.mustSiacoinOutput(store, sci.ParentID), - }) + tdiff.SpentSiacoinElements = append(tdiff.SpentSiacoinElements, ms.mustSiacoinElement(store, sci.ParentID)) } for i, sco := range txn.SiacoinOutputs { scoid := txn.SiacoinOutputID(i) - tdiff.CreatedSiacoinOutputs = append(tdiff.CreatedSiacoinOutputs, SiacoinOutputDiff{ - ID: scoid, - Output: sco, + tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(scoid)}, + SiacoinOutput: sco, }) } for i, fc := range txn.FileContracts { fcid := txn.FileContractID(i) - tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, FileContractDiff{ - ID: fcid, - Contract: fc, + tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.FileContractElement{ + StateElement: types.StateElement{ID: types.Hash256(fcid)}, + FileContract: fc, }) } for _, sfi := range txn.SiafundInputs { - sfo, claimStart, claimPortion := ms.mustSiafundOutput(store, sfi.ParentID) - tdiff.SpentSiafundOutputs = append(tdiff.SpentSiafundOutputs, SiafundOutputDiff{ - ID: sfi.ParentID, - Output: sfo, - ClaimStart: claimStart, - }) - tdiff.ImmatureSiacoinOutputs = append(tdiff.ImmatureSiacoinOutputs, DelayedSiacoinOutputDiff{ - ID: sfi.ParentID.ClaimOutputID(), - Output: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, - Source: OutputSourceSiafundClaim, + sfe, claimPortion := ms.mustSiafundElement(store, sfi.ParentID) + tdiff.SpentSiafundElements = append(tdiff.SpentSiafundElements, sfe) + tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(sfi.ParentID.ClaimOutputID())}, + SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, MaturityHeight: s.MaturityHeight(), }) } for i, sfo := range txn.SiafundOutputs { sfoid := txn.SiafundOutputID(i) - tdiff.CreatedSiafundOutputs = append(tdiff.CreatedSiafundOutputs, SiafundOutputDiff{ - ID: sfoid, - Output: sfo, - ClaimStart: ms.siafundPool, + tdiff.CreatedSiafundElements = append(tdiff.CreatedSiafundElements, types.SiafundElement{ + StateElement: types.StateElement{ID: types.Hash256(sfoid)}, + SiafundOutput: sfo, + ClaimStart: ms.siafundPool, }) } for _, fcr := range txn.FileContractRevisions { - fc := ms.mustFileContractParentRevision(store, fcr.ParentID, fcr.RevisionNumber) - newContract := fcr.FileContract - newContract.Payout = fc.Payout // see types.FileContractRevision docstring - tdiff.RevisedFileContracts = append(tdiff.RevisedFileContracts, FileContractRevisionDiff{ - ID: fcr.ParentID, - OldContract: fc, - NewContract: newContract, + fce := ms.mustFileContractParentRevision(store, fcr.ParentID, fcr.RevisionNumber) + tdiff.RevisedFileContracts = append(tdiff.RevisedFileContracts, types.FileContractElementRevision{ + Parent: fce, + Revision: fcr.FileContract, }) } for _, sp := range txn.StorageProofs { - fc := ms.mustFileContract(store, sp.ParentID) - tdiff.ValidFileContracts = append(tdiff.ValidFileContracts, FileContractDiff{ - ID: sp.ParentID, - Contract: fc, - }) - for i, sco := range fc.ValidProofOutputs { - tdiff.ImmatureSiacoinOutputs = append(tdiff.ImmatureSiacoinOutputs, DelayedSiacoinOutputDiff{ - ID: sp.ParentID.ValidOutputID(i), - Output: sco, - Source: OutputSourceValidContract, + fce := ms.mustFileContractElement(store, sp.ParentID) + tdiff.ValidFileContracts = append(tdiff.ValidFileContracts, fce) + for i, sco := range fce.ValidProofOutputs { + scoid := sp.ParentID.ValidOutputID(i) + tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(scoid)}, + SiacoinOutput: sco, MaturityHeight: s.MaturityHeight(), }) } } + diff.Transactions = append(diff.Transactions, tdiff) ms.ApplyTransaction(store, txn) } @@ -760,9 +613,14 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { for _, txn := range b.V2.Transactions { var tdiff V2TransactionDiff txid := txn.ID() + + for _, sci := range txn.SiacoinInputs { + tdiff.SpentSiacoinElements = append(tdiff.SpentSiacoinElements, sci.Parent) + } for _, sco := range txn.SiacoinOutputs { + scoid := v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)) tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)))}, + StateElement: types.StateElement{ID: types.Hash256(scoid)}, SiacoinOutput: sco, }) } @@ -773,24 +631,30 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { }) } for _, sfi := range txn.SiafundInputs { + tdiff.SpentSiafundElements = append(tdiff.SpentSiafundElements, sfi.Parent) + scoid := v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)) claimPortion := ms.siafundPool.Sub(sfi.Parent.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfi.Parent.Value) tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)))}, + StateElement: types.StateElement{ID: types.Hash256(scoid)}, SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, MaturityHeight: s.MaturityHeight(), }) } for _, sfo := range txn.SiafundOutputs { + sfoid := v2SiafundOutputID(txid, len(tdiff.CreatedSiafundElements)) tdiff.CreatedSiafundElements = append(tdiff.CreatedSiafundElements, types.SiafundElement{ - StateElement: types.StateElement{ID: types.Hash256(v2SiafundOutputID(txid, len(tdiff.CreatedSiafundElements)))}, + StateElement: types.StateElement{ID: types.Hash256(sfoid)}, SiafundOutput: sfo, ClaimStart: ms.siafundPool, }) } + tdiff.RevisedFileContracts = append(tdiff.RevisedFileContracts, txn.FileContractRevisions...) + tdiff.ResolvedFileContracts = append(tdiff.ResolvedFileContracts, txn.FileContractResolutions...) for _, res := range txn.FileContractResolutions { if r, ok := res.Resolution.(types.V2FileContractRenewal); ok { + fcid := v2FileContractID(txid, len(tdiff.CreatedFileContracts)) tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.V2FileContractElement{ - StateElement: types.StateElement{ID: types.Hash256(v2FileContractID(txid, len(tdiff.CreatedFileContracts)))}, + StateElement: types.StateElement{ID: types.Hash256(fcid)}, V2FileContract: r.InitialRevision, }) } @@ -801,12 +665,11 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { } bid := b.ID() - diff.MaturedSiacoinOutputs = store.MaturedSiacoinOutputs(s.childHeight()) for i, sco := range b.MinerPayouts { - diff.ImmatureSiacoinOutputs = append(diff.ImmatureSiacoinOutputs, DelayedSiacoinOutputDiff{ - ID: bid.MinerOutputID(i), - Output: sco, - Source: OutputSourceMiner, + scoid := bid.MinerOutputID(i) + diff.CreatedSiacoinElements = append(diff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(scoid)}, + SiacoinOutput: sco, MaturityHeight: s.MaturityHeight(), }) } @@ -814,28 +677,27 @@ func ApplyDiff(s State, store Store, b types.Block) BlockDiff { if _, ok := ms.spent(types.Hash256(fcid)); ok { continue } - fc := ms.mustFileContract(store, fcid) - diff.MissedFileContracts = append(diff.MissedFileContracts, FileContractDiff{ - ID: fcid, - Contract: fc, - }) - for i, sco := range fc.MissedProofOutputs { - diff.ImmatureSiacoinOutputs = append(diff.ImmatureSiacoinOutputs, DelayedSiacoinOutputDiff{ - ID: fcid.MissedOutputID(i), - Output: sco, - Source: OutputSourceMissedContract, + fce := ms.mustFileContractElement(store, fcid) + diff.MissedFileContracts = append(diff.MissedFileContracts, fce) + for i, sco := range fce.MissedProofOutputs { + scoid := fcid.MissedOutputID(i) + diff.CreatedSiacoinElements = append(diff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(scoid)}, + SiacoinOutput: sco, MaturityHeight: s.MaturityHeight(), }) } } if subsidy := s.FoundationSubsidy(); !subsidy.Value.IsZero() { - diff.ImmatureSiacoinOutputs = append(diff.ImmatureSiacoinOutputs, DelayedSiacoinOutputDiff{ - ID: bid.FoundationOutputID(), - Output: subsidy, - Source: OutputSourceFoundation, + scoid := bid.FoundationOutputID() + diff.CreatedSiacoinElements = append(diff.CreatedSiacoinElements, types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(scoid)}, + SiacoinOutput: subsidy, MaturityHeight: s.MaturityHeight(), }) } + diff.ElementApplyUpdate = s.Elements.ApplyBlock(&diff) // fills in leaf index + proofs for all elements + diff.HistoryApplyUpdate = s.History.ApplyBlock(types.ChainIndex{Height: s.Index.Height + 1, ID: bid}) return diff } diff --git a/consensus/update_test.go b/consensus/update_test.go index d4e2a55b..69d38e9c 100644 --- a/consensus/update_test.go +++ b/consensus/update_test.go @@ -73,12 +73,11 @@ func TestApplyBlock(t *testing.T) { MinerPayouts: []types.SiacoinOutput{{Address: types.VoidAddress, Value: cs.BlockReward()}}, } expect := consensus.BlockDiff{ - ImmatureSiacoinOutputs: []consensus.DelayedSiacoinOutputDiff{ + CreatedSiacoinElements: []types.SiacoinElement{ { - ID: b1.ID().MinerOutputID(0), - Output: b1.MinerPayouts[0], + StateElement: types.StateElement{ID: types.Hash256(b1.ID().MinerOutputID(0))}, + SiacoinOutput: b1.MinerPayouts[0], MaturityHeight: cs.MaturityHeight(), - Source: consensus.OutputSourceMiner, }, }, } @@ -119,32 +118,31 @@ func TestApplyBlock(t *testing.T) { } expect = consensus.BlockDiff{ Transactions: []consensus.TransactionDiff{{ - CreatedSiacoinOutputs: []consensus.SiacoinOutputDiff{ - {ID: txnB2.SiacoinOutputID(0), Output: txnB2.SiacoinOutputs[0]}, - {ID: txnB2.SiacoinOutputID(1), Output: txnB2.SiacoinOutputs[1]}, + CreatedSiacoinElements: []types.SiacoinElement{ + {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiacoinOutputID(0))}, SiacoinOutput: txnB2.SiacoinOutputs[0]}, + {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiacoinOutputID(1))}, SiacoinOutput: txnB2.SiacoinOutputs[1]}, + { + StateElement: types.StateElement{ID: types.Hash256(giftTxn.SiafundOutputID(0).ClaimOutputID())}, + SiacoinOutput: types.SiacoinOutput{Value: types.NewCurrency64(0), Address: txnB2.SiafundInputs[0].ClaimAddress}, + MaturityHeight: cs.MaturityHeight(), + }, }, - SpentSiacoinOutputs: []consensus.SiacoinOutputDiff{ - {ID: giftTxn.SiacoinOutputID(0), Output: giftTxn.SiacoinOutputs[0]}, + SpentSiacoinElements: []types.SiacoinElement{ + {StateElement: types.StateElement{ID: types.Hash256(giftTxn.SiacoinOutputID(0))}, SiacoinOutput: giftTxn.SiacoinOutputs[0]}, }, - CreatedSiafundOutputs: []consensus.SiafundOutputDiff{ - {ID: txnB2.SiafundOutputID(0), Output: txnB2.SiafundOutputs[0]}, - {ID: txnB2.SiafundOutputID(1), Output: txnB2.SiafundOutputs[1]}, + CreatedSiafundElements: []types.SiafundElement{ + {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiafundOutputID(0))}, SiafundOutput: txnB2.SiafundOutputs[0]}, + {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiafundOutputID(1))}, SiafundOutput: txnB2.SiafundOutputs[1]}, }, - SpentSiafundOutputs: []consensus.SiafundOutputDiff{ - {ID: giftTxn.SiafundOutputID(0), Output: giftTxn.SiafundOutputs[0]}, + SpentSiafundElements: []types.SiafundElement{ + {StateElement: types.StateElement{ID: types.Hash256(giftTxn.SiafundOutputID(0))}, SiafundOutput: giftTxn.SiafundOutputs[0]}, }, - ImmatureSiacoinOutputs: []consensus.DelayedSiacoinOutputDiff{{ - ID: giftTxn.SiafundOutputID(0).ClaimOutputID(), - Output: types.SiacoinOutput{Value: types.NewCurrency64(0), Address: txnB2.SiafundInputs[0].ClaimAddress}, - MaturityHeight: cs.MaturityHeight(), - Source: consensus.OutputSourceSiafundClaim, - }}, }}, - ImmatureSiacoinOutputs: []consensus.DelayedSiacoinOutputDiff{{ - ID: b2.ID().MinerOutputID(0), - Output: b2.MinerPayouts[0], + + CreatedSiacoinElements: []types.SiacoinElement{{ + StateElement: types.StateElement{ID: types.Hash256(b2.ID().MinerOutputID(0))}, + SiacoinOutput: b2.MinerPayouts[0], MaturityHeight: cs.MaturityHeight(), - Source: consensus.OutputSourceMiner, }}, } if diff, err := addBlock(b2); err != nil { diff --git a/consensus/validation.go b/consensus/validation.go index 5895ef90..d974222f 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -91,11 +91,10 @@ func ValidateOrphan(s State, b types.Block) error { // A MidState represents the state of the blockchain within a block. type MidState struct { base State - scos map[types.SiacoinOutputID]types.SiacoinOutput - sfos map[types.SiafundOutputID]types.SiafundOutput - claims map[types.SiafundOutputID]types.Currency - fcs map[types.FileContractID]types.FileContract - v2fcs map[types.FileContractID]types.V2FileContract + sces map[types.SiacoinOutputID]types.SiacoinElement + sfes map[types.SiafundOutputID]types.SiafundElement + fces map[types.FileContractID]types.FileContractElement + v2fces map[types.FileContractID]types.V2FileContractElement spends map[types.Hash256]types.TransactionID siafundPool types.Currency } @@ -105,54 +104,53 @@ func (ms *MidState) Index() types.ChainIndex { return ms.base.Index } -func (ms *MidState) siacoinOutput(store Store, id types.SiacoinOutputID) (types.SiacoinOutput, bool) { - sco, ok := ms.scos[id] +func (ms *MidState) siacoinElement(store Store, id types.SiacoinOutputID) (types.SiacoinElement, bool) { + sce, ok := ms.sces[id] if !ok { - sco, ok = store.SiacoinOutput(id) + sce, ok = store.SiacoinElement(id) } - return sco, ok + return sce, ok } -func (ms *MidState) siafundOutput(store Store, id types.SiafundOutputID) (types.SiafundOutput, types.Currency, types.Currency, bool) { - sfo, ok := ms.sfos[id] - claimStart := ms.claims[id] +func (ms *MidState) siafundElement(store Store, id types.SiafundOutputID) (types.SiafundElement, types.Currency, bool) { + sfe, ok := ms.sfes[id] if !ok { - sfo, claimStart, ok = store.SiafundOutput(id) + sfe, ok = store.SiafundElement(id) } - claimPortion := ms.siafundPool.Sub(claimStart).Div64(ms.base.SiafundCount()).Mul64(sfo.Value) - return sfo, claimStart, claimPortion, ok + claimPortion := ms.siafundPool.Sub(sfe.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfe.Value) + return sfe, claimPortion, ok } -func (ms *MidState) fileContract(store Store, id types.FileContractID) (types.FileContract, bool) { - fc, ok := ms.fcs[id] +func (ms *MidState) fileContractElement(store Store, id types.FileContractID) (types.FileContractElement, bool) { + fce, ok := ms.fces[id] if !ok { - fc, ok = store.FileContract(id) + fce, ok = store.FileContractElement(id) } - return fc, ok + return fce, ok } -func (ms *MidState) mustSiacoinOutput(store Store, id types.SiacoinOutputID) types.SiacoinOutput { - sco, ok := ms.siacoinOutput(store, id) +func (ms *MidState) mustSiacoinElement(store Store, id types.SiacoinOutputID) types.SiacoinElement { + sce, ok := ms.siacoinElement(store, id) if !ok { - panic("missing SiacoinOutput") + panic("missing SiacoinElement") } - return sco + return sce } -func (ms *MidState) mustSiafundOutput(store Store, id types.SiafundOutputID) (types.SiafundOutput, types.Currency, types.Currency) { - sfo, claimStart, claimPortion, ok := ms.siafundOutput(store, id) +func (ms *MidState) mustSiafundElement(store Store, id types.SiafundOutputID) (types.SiafundElement, types.Currency) { + sfe, claimPortion, ok := ms.siafundElement(store, id) if !ok { - panic("missing SiafundOutput") + panic("missing SiafundElement") } - return sfo, claimStart, claimPortion + return sfe, claimPortion } -func (ms *MidState) mustFileContract(store Store, id types.FileContractID) types.FileContract { - fc, ok := ms.fileContract(store, id) +func (ms *MidState) mustFileContractElement(store Store, id types.FileContractID) types.FileContractElement { + fce, ok := ms.fileContractElement(store, id) if !ok { - panic("missing FileContract") + panic("missing FileContractElement") } - return fc + return fce } func contractRevisionID(id types.FileContractID, revisionNumber uint64) types.FileContractID { @@ -164,14 +162,14 @@ func contractRevisionID(id types.FileContractID, revisionNumber uint64) types.Fi return types.FileContractID(h.Sum()) } -func (ms *MidState) mustFileContractParentRevision(store Store, id types.FileContractID, newRevisionNumber uint64) types.FileContract { - fc, ok := ms.fileContract(store, contractRevisionID(id, newRevisionNumber)) +func (ms *MidState) mustFileContractParentRevision(store Store, id types.FileContractID, newRevisionNumber uint64) types.FileContractElement { + fce, ok := ms.fileContractElement(store, contractRevisionID(id, newRevisionNumber)) if !ok { - if fc, ok = ms.fileContract(store, id); !ok { - panic("missing FileContract") + if fce, ok = ms.fileContractElement(store, id); !ok { + panic("missing FileContractElement") } } - return fc + return fce } func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { @@ -179,20 +177,19 @@ func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { return txid, ok } -func (ms *MidState) v2Revision(id types.FileContractID) (types.V2FileContract, bool) { - fc, ok := ms.v2fcs[id] - return fc, ok +func (ms *MidState) v2Revision(id types.FileContractID) (types.V2FileContractElement, bool) { + fce, ok := ms.v2fces[id] + return fce, ok } // NewMidState constructs a MidState initialized to the provided base state. func NewMidState(s State) *MidState { return &MidState{ base: s, - scos: make(map[types.SiacoinOutputID]types.SiacoinOutput), - sfos: make(map[types.SiafundOutputID]types.SiafundOutput), - claims: make(map[types.SiafundOutputID]types.Currency), - fcs: make(map[types.FileContractID]types.FileContract), - v2fcs: make(map[types.FileContractID]types.V2FileContract), + sces: make(map[types.SiacoinOutputID]types.SiacoinElement), + sfes: make(map[types.SiafundOutputID]types.SiafundElement), + fces: make(map[types.FileContractID]types.FileContractElement), + v2fces: make(map[types.FileContractID]types.V2FileContractElement), spends: make(map[types.Hash256]types.TransactionID), siafundPool: s.SiafundPool, } @@ -279,11 +276,13 @@ func validateSiacoins(ms *MidState, store Store, txn types.Transaction) error { } else if txid, ok := ms.spent(types.Hash256(sci.ParentID)); ok { return fmt.Errorf("siacoin input %v double-spends parent output (previously spent in %v)", i, txid) } - parent, ok := ms.siacoinOutput(store, sci.ParentID) + parent, ok := ms.siacoinElement(store, sci.ParentID) if !ok { return fmt.Errorf("siacoin input %v spends nonexistent siacoin output %v", i, sci.ParentID) } else if sci.UnlockConditions.UnlockHash() != parent.Address { return fmt.Errorf("siacoin input %v claims incorrect unlock conditions for siacoin output %v", i, sci.ParentID) + } else if parent.MaturityHeight > ms.base.childHeight() { + return fmt.Errorf("siacoin input %v has immature parent", i) } inputSum = inputSum.Add(parent.Value) } @@ -311,7 +310,7 @@ func validateSiafunds(ms *MidState, store Store, txn types.Transaction) error { } else if txid, ok := ms.spent(types.Hash256(sfi.ParentID)); ok { return fmt.Errorf("siafund input %v double-spends parent output (previously spent in %v)", i, txid) } - parent, _, _, ok := ms.siafundOutput(store, sfi.ParentID) + parent, _, ok := ms.siafundElement(store, sfi.ParentID) if !ok { return fmt.Errorf("siafund input %v spends nonexistent siafund output %v", i, sfi.ParentID) } else if sfi.UnlockConditions.UnlockHash() != parent.Address && @@ -364,7 +363,7 @@ func validateFileContracts(ms *MidState, store Store, txn types.Transaction) err } else if txid, ok := ms.spent(types.Hash256(fcr.ParentID)); ok { return fmt.Errorf("file contract revision %v conflicts with previous proof or revision (in %v)", i, txid) } - parent, ok := ms.fileContract(store, fcr.ParentID) + parent, ok := ms.fileContractElement(store, fcr.ParentID) if !ok { return fmt.Errorf("file contract revision %v revises nonexistent file contract %v", i, fcr.ParentID) } @@ -449,7 +448,7 @@ func validateFileContracts(ms *MidState, store Store, txn types.Transaction) err if txid, ok := ms.spent(types.Hash256(sp.ParentID)); ok { return fmt.Errorf("storage proof %v conflicts with previous proof (in %v)", i, txid) } - fc, ok := ms.fileContract(store, sp.ParentID) + fc, ok := ms.fileContractElement(store, sp.ParentID) if !ok { return fmt.Errorf("storage proof %v references nonexistent file contract", i) } @@ -717,7 +716,7 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { // check accumulator if sci.Parent.LeafIndex == types.EphemeralLeafIndex { - if _, ok := ms.scos[types.SiacoinOutputID(sci.Parent.ID)]; !ok { + if _, ok := ms.sces[types.SiacoinOutputID(sci.Parent.ID)]; !ok { return fmt.Errorf("siacoin input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) } } else if !ms.base.Elements.ContainsUnspentSiacoinElement(sci.Parent) { @@ -773,7 +772,7 @@ func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { // check accumulator if sci.Parent.LeafIndex == types.EphemeralLeafIndex { - if _, ok := ms.sfos[types.SiafundOutputID(sci.Parent.ID)]; !ok { + if _, ok := ms.sfes[types.SiafundOutputID(sci.Parent.ID)]; !ok { return fmt.Errorf("siafund input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) } } else if !ms.base.Elements.ContainsUnspentSiafundElement(sci.Parent) { @@ -883,8 +882,8 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { for i, fcr := range txn.FileContractRevisions { cur, rev := fcr.Parent.V2FileContract, fcr.Revision - if fc, ok := ms.v2Revision(types.FileContractID(fcr.Parent.ID)); ok { - cur = fc + if fce, ok := ms.v2Revision(types.FileContractID(fcr.Parent.ID)); ok { + cur = fce.V2FileContract } if err := validateParent(fcr.Parent); err != nil { return fmt.Errorf("file contract revision %v parent (%v) %s", i, fcr.Parent.ID, err) diff --git a/types/encoding.go b/types/encoding.go index b2e86558..2ddd9edd 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -571,6 +571,12 @@ func (fce FileContractElement) EncodeTo(e *Encoder) { fce.FileContract.EncodeTo(e) } +// EncodeTo implements types.EncoderTo. +func (fcer FileContractElementRevision) EncodeTo(e *Encoder) { + fcer.Parent.EncodeTo(e) + fcer.Revision.EncodeTo(e) +} + // EncodeTo implements types.EncoderTo. func (fce V2FileContractElement) EncodeTo(e *Encoder) { fce.StateElement.EncodeTo(e) @@ -1117,6 +1123,12 @@ func (fce *FileContractElement) DecodeFrom(d *Decoder) { fce.FileContract.DecodeFrom(d) } +// DecodeFrom implements types.DecoderFrom. +func (fcer *FileContractElementRevision) DecodeFrom(d *Decoder) { + fcer.Parent.DecodeFrom(d) + fcer.Revision.DecodeFrom(d) +} + // DecodeFrom implements types.DecoderFrom. func (fce *V2FileContractElement) DecodeFrom(d *Decoder) { fce.StateElement.DecodeFrom(d) diff --git a/types/types.go b/types/types.go index 7b6fdf1d..ee0f3563 100644 --- a/types/types.go +++ b/types/types.go @@ -601,6 +601,20 @@ type FileContractElement struct { FileContract } +// A FileContractElementRevision updates the state of an existing file contract. +type FileContractElementRevision struct { + Parent FileContractElement `json:"parent"` + Revision FileContract `json:"revision"` +} + +// RevisedElement returns the post-revision FileContractElement. +func (fcer FileContractElementRevision) RevisedElement() FileContractElement { + fce := fcer.Parent + fce.FileContract = fcer.Revision + fce.Payout = fcer.Parent.Payout // see FileContractRevision docstring + return fce +} + // A V2FileContractElement is a storage agreement between a renter and a host. type V2FileContractElement struct { StateElement From f149e8f95c0a5870e341d3729644417ac6b45281 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 8 Aug 2023 15:29:17 -0400 Subject: [PATCH 13/53] consensus,chain: Overhaul block application This is an unfortunately-messy diff. There's a lot going on here: - The role of MidState has expanded, subsuming the responsibilities of ApplyDiff. Conceptually, applying a block now means applying each of its transactions to a MidState, then applying any block- level effects (e.g. miner payouts); subsequently, the MidState will contain the effects of the applications, which can be used as desired (i.e. passing them to subscribers, and to the element accumulator). - BlockDiffs have been replaced with ApplyUpdates. BlockDiffs were a good attempt to summarize the changes within a block, but they duplicated too much data and did not play nice with the element accumulator. The new ApplyUpdate wraps MidState - The consensus.Store interface has been removed. Now, instead of consensus calling into a Store interface, the store provides all the "supplementary" data, e.g. siacoin inputs, up-front. This simplifies the API boundaries and means consensus performs no I/O. Checkpoints now store a Supplement instead of an entire Diff, which saves space and is a more natural fit anyway. After this, no further major changes to consensus should be necessary for v2. There's still plenty of testing and bugfixing ahead, though! Also, the chain.Subscriber API has likely suffered a bit from the removal of BlockDiff, so I'll need to revisit it to ensure that its ergonomics are adequate. --- chain/db.go | 363 +++++++++-------- chain/manager.go | 95 +++-- chain/manager_test.go | 2 +- consensus/merkle.go | 333 +++++---------- consensus/state.go | 25 +- consensus/store.go | 125 ++++++ consensus/update.go | 763 ++++++++++++++--------------------- consensus/update_test.go | 117 +++--- consensus/validation.go | 160 ++++---- consensus/validation_test.go | 4 +- types/encoding.go | 4 - types/types.go | 1 + 12 files changed, 942 insertions(+), 1050 deletions(-) create mode 100644 consensus/store.go diff --git a/chain/db.go b/chain/db.go index 3df35762..ab101f4f 100644 --- a/chain/db.go +++ b/chain/db.go @@ -200,13 +200,12 @@ var ( // DBStore implements Store using a key-value database. type DBStore struct { - db DB - n *consensus.Network // for getCheckpoint + db DB + n *consensus.Network // for getCheckpoint + enc types.Encoder unflushed int lastFlush time.Time - - enc types.Encoder } func (db *DBStore) bucket(name []byte) *dbBucket { @@ -215,8 +214,7 @@ func (db *DBStore) bucket(name []byte) *dbBucket { func (db *DBStore) encHeight(height uint64) []byte { var buf [8]byte - binary.BigEndian.PutUint64(buf[:], height) - return buf[:] + return binary.BigEndian.AppendUint64(buf[:0], height) } func (db *DBStore) putBestIndex(index types.ChainIndex) { @@ -262,26 +260,38 @@ func (db *DBStore) encLeaf(index uint64, height int) []byte { // distinguish these by setting the penultimate bit. Each time we ascend a // level, we have one fewer bit to work with; but since each level requires // half as many nodes as the previous, it balances out and we always have - // enough space. - return binary.BigEndian.AppendUint32(nil, bits.RotateLeft32(uint32(index)|((1<= 0; i-- { - td := diff.Transactions[i] - for _, fce := range td.ValidFileContracts { - db.putFileContract(fce) - } - for _, fcer := range td.RevisedFileContracts { - db.deleteFileContract(types.FileContractID(fcer.Parent.ID), fcer.Revision.WindowEnd) - db.putFileContract(fcer.Parent) - } - for _, sfe := range td.SpentSiafundElements { + } else { db.putSiafundElement(sfe) } - for _, sce := range td.SpentSiacoinElements { - db.putSiacoinElement(sce) - } - for _, fce := range td.CreatedFileContracts { - db.deleteFileContract(types.FileContractID(fce.ID), fce.WindowEnd) - } - for _, sfe := range td.CreatedSiafundElements { - db.deleteSiafundElement(types.SiafundOutputID(sfe.ID)) - } - for _, sce := range td.CreatedSiacoinElements { - db.deleteSiacoinElement(types.SiacoinOutputID(sce.ID)) + db.putElementProof(sfe.StateElement) + }) + cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool) { + if resolved { + db.deleteFileContractElement(types.FileContractID(fce.ID)) + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + } else if rev != nil { + db.putFileContractElement(*rev) + if rev.WindowEnd != fce.WindowEnd { + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + db.putFileContractExpiration(types.FileContractID(fce.ID), rev.WindowEnd) + } + } else { + db.putFileContractElement(fce) + db.putFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + } + db.putElementProof(fce.StateElement) + }) +} + +func (db *DBStore) revertElements(cru consensus.RevertUpdate) { + cru.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool) { + if resolved { + // contract no longer resolved; restore it + db.putFileContractElement(fce) + db.putFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + db.putElementProof(fce.StateElement) + } else if rev != nil { + // contract no longer revised; restore prior revision + db.putFileContractElement(fce) + if rev.WindowEnd != fce.WindowEnd { + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + db.putFileContractExpiration(types.FileContractID(fce.ID), rev.WindowEnd) + } + db.putElementProof(fce.StateElement) + } else { + // contract no longer exists; delete it + db.deleteFileContractElement(types.FileContractID(fce.ID)) + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) } - } - - for i := len(diff.V2Transactions) - 1; i >= 0; i-- { - td := diff.V2Transactions[i] - for _, sfe := range td.SpentSiafundElements { + }) + cru.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + if sfe.LeafIndex == types.EphemeralLeafIndex { + return + } else if spent { + // output no longer spent; restore it db.putSiafundElement(sfe) - } - for _, sce := range td.SpentSiacoinElements { - db.putSiacoinElement(sce) - } - for _, sfe := range td.CreatedSiafundElements { + db.putElementProof(sfe.StateElement) + } else { + // output no longer exists; delete it db.deleteSiafundElement(types.SiafundOutputID(sfe.ID)) } - for _, sce := range td.CreatedSiacoinElements { + }) + cru.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + if sce.LeafIndex == types.EphemeralLeafIndex { + return + } else if spent { + // output no longer spent; restore it + db.putSiacoinElement(sce) + db.putElementProof(sce.StateElement) + } else { + // output no longer exists; delete it db.deleteSiacoinElement(types.SiacoinOutputID(sce.ID)) } - } + }) - // TODO: proofs!!!! + // NOTE: Although the element tree has shrunk, we do not need to explicitly + // delete any nodes; getElementProof always stops at the correct height for + // the given tree size, so the no-longer-valid nodes are simply never + // accessed. (They will continue to occupy storage, but this storage will + // inevitably be overwritten by future nodes, so there is little reason to + // reclaim it immediately.) } -// BestIndex implements consensus.Store. +// BestIndex implements Store. func (db *DBStore) BestIndex(height uint64) (index types.ChainIndex, ok bool) { index.Height = height ok = db.bucket(bMainChain).get(db.encHeight(height), &index.ID) return } -// AncestorTimestamp implements consensus.Store. -func (db *DBStore) AncestorTimestamp(id types.BlockID, n uint64) time.Time { +// AncestorTimestamp implements Store. +func (db *DBStore) AncestorTimestamp(id types.BlockID, depth uint64) time.Time { c, _ := db.Checkpoint(id) - for i := uint64(1); i < n; i++ { + for i := uint64(1); i < depth; i++ { // if we're on the best path, we can jump to the n'th block directly if index, _ := db.BestIndex(c.State.Index.Height); index.ID == id { - ancestorIndex, _ := db.BestIndex(c.State.Index.Height - (n - i)) + height := c.State.Index.Height - (depth - i) + if c.State.Index.Height < (depth - i) { + height = 0 + } + ancestorIndex, _ := db.BestIndex(height) c, _ = db.Checkpoint(ancestorIndex.ID) break } @@ -461,34 +481,61 @@ func (db *DBStore) AncestorTimestamp(id types.BlockID, n uint64) time.Time { return c.Block.Timestamp } -// SiacoinElement implements consensus.Store. -func (db *DBStore) SiacoinElement(id types.SiacoinOutputID) (sce types.SiacoinElement, ok bool) { - ok = db.bucket(bSiacoinElements).get(id[:], &sce) - sce.MerkleProof = db.getElementProof(sce.LeafIndex) - return -} +// SupplementTipTransaction implements Store. +func (db *DBStore) SupplementTipTransaction(txn types.Transaction) (ts consensus.V1TransactionSupplement) { + // get tip state, for proof-trimming + index, _ := db.BestIndex(db.getHeight()) + c, _ := db.Checkpoint(index.ID) + numLeaves := c.State.Elements.NumLeaves -// FileContractElement implements consensus.Store. -func (db *DBStore) FileContractElement(id types.FileContractID) (fce types.FileContractElement, ok bool) { - ok = db.bucket(bFileContractElements).get(id[:], &fce) - fce.MerkleProof = db.getElementProof(fce.LeafIndex) + for _, sci := range txn.SiacoinInputs { + if sce, ok := db.getSiacoinElement(sci.ParentID, numLeaves); ok { + ts.SiacoinInputs = append(ts.SiacoinInputs, sce) + } + } + for _, sfi := range txn.SiafundInputs { + if sfe, ok := db.getSiafundElement(sfi.ParentID, numLeaves); ok { + ts.SiafundInputs = append(ts.SiafundInputs, sfe) + } + } + for _, fcr := range txn.FileContractRevisions { + if fce, ok := db.getFileContractElement(fcr.ParentID, numLeaves); ok { + ts.RevisedFileContracts = append(ts.RevisedFileContracts, fce) + } + } + for _, sp := range txn.StorageProofs { + if fce, ok := db.getFileContractElement(sp.ParentID, numLeaves); ok { + ts.ValidFileContracts = append(ts.ValidFileContracts, fce) + if windowIndex, ok := db.BestIndex(fce.WindowStart - 1); ok { + ts.StorageProofBlockIDs = append(ts.StorageProofBlockIDs, windowIndex.ID) + } + } + } return } -// MissedFileContracts implements consensus.Store. -func (db *DBStore) MissedFileContracts(height uint64) (fcids []types.FileContractID) { - ids := db.bucket(bFileContractElements).getRaw(db.encHeight(height)) +// SupplementTipBlock implements Store. +func (db *DBStore) SupplementTipBlock(b types.Block) (bs consensus.V1BlockSupplement) { + // get tip state, for proof-trimming + index, _ := db.BestIndex(db.getHeight()) + c, _ := db.Checkpoint(index.ID) + numLeaves := c.State.Elements.NumLeaves + + bs = consensus.V1BlockSupplement{ + Transactions: make([]consensus.V1TransactionSupplement, len(b.Transactions)), + } + for i, txn := range b.Transactions { + bs.Transactions[i] = db.SupplementTipTransaction(txn) + } + ids := db.bucket(bFileContractElements).getRaw(db.encHeight(db.getHeight() + 1)) for i := 0; i < len(ids); i += 32 { - fcids = append(fcids, *(*types.FileContractID)(ids[i:])) + fce, ok := db.getFileContractElement(*(*types.FileContractID)(ids[i:]), numLeaves) + if !ok { + panic("missing FileContractElement") + } + bs.ExpiringFileContracts = append(bs.ExpiringFileContracts, fce) } - return -} - -// SiafundOutput implements consensus.Store. -func (db *DBStore) SiafundElement(id types.SiafundOutputID) (sfe types.SiafundElement, ok bool) { - ok = db.bucket(bSiafundElements).get(id[:], &sfe) - sfe.MerkleProof = db.getElementProof(sfe.LeafIndex) - return + return bs } // AddCheckpoint implements Store. @@ -519,10 +566,10 @@ func (db *DBStore) flush() { db.lastFlush = time.Now() } -// ApplyDiff implements Store. -func (db *DBStore) ApplyDiff(s consensus.State, diff consensus.BlockDiff, mustCommit bool) (committed bool) { +// ApplyBlock implements Store. +func (db *DBStore) ApplyBlock(s consensus.State, cau consensus.ApplyUpdate, mustCommit bool) (committed bool) { db.applyState(s) - db.applyDiff(diff) + db.applyElements(cau) committed = mustCommit || db.shouldFlush() if committed { db.flush() @@ -530,9 +577,9 @@ func (db *DBStore) ApplyDiff(s consensus.State, diff consensus.BlockDiff, mustCo return } -// RevertDiff implements Store. -func (db *DBStore) RevertDiff(s consensus.State, diff consensus.BlockDiff) { - db.revertDiff(diff) +// RevertBlock implements Store. +func (db *DBStore) RevertBlock(s consensus.State, cru consensus.RevertUpdate) { + db.revertElements(cru) db.revertState(s) if db.shouldFlush() { db.flush() @@ -585,12 +632,10 @@ func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBSto // store genesis checkpoint and apply its effects genesisState := n.GenesisState() - cs := consensus.ApplyState(genesisState, dbs, genesisBlock) - diff := consensus.ApplyDiff(genesisState, dbs, genesisBlock) - dbs.putCheckpoint(Checkpoint{genesisBlock, cs, &diff}) - dbs.applyState(cs) - dbs.applyDiff(diff) - dbs.flush() + bs := consensus.V1BlockSupplement{Transactions: make([]consensus.V1TransactionSupplement, len(genesisBlock.Transactions))} + cs, cau := consensus.ApplyBlock(genesisState, genesisBlock, bs, time.Time{}) + dbs.putCheckpoint(Checkpoint{genesisBlock, cs, &bs}) + dbs.ApplyBlock(cs, cau, true) } else if dbGenesis.ID != genesisBlock.ID() { // try to detect network so we can provide a more helpful error message _, mainnetGenesis := Mainnet() diff --git a/chain/manager.go b/chain/manager.go index 15e1f0f7..f20ba4e2 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -18,20 +18,20 @@ var ( // A Checkpoint pairs a block with its resulting chain state. type Checkpoint struct { - Block types.Block - State consensus.State - Diff *consensus.BlockDiff // nil if the block has not been validated + Block types.Block + State consensus.State + Supplement *consensus.V1BlockSupplement } // EncodeTo implements types.EncoderTo. func (c Checkpoint) EncodeTo(e *types.Encoder) { - e.WriteUint8(2) // block (and diff) version + e.WriteUint8(2) // block (and supplement) version types.V2Block(c.Block).EncodeTo(e) e.WriteUint8(1) // state version c.State.EncodeTo(e) - e.WriteBool(c.Diff != nil) - if c.Diff != nil { - c.Diff.EncodeTo(e) + e.WriteBool(c.Supplement != nil) + if c.Supplement != nil { + c.Supplement.EncodeTo(e) } } @@ -47,25 +47,27 @@ func (c *Checkpoint) DecodeFrom(d *types.Decoder) { } c.State.DecodeFrom(d) if d.ReadBool() { - c.Diff = new(consensus.BlockDiff) - c.Diff.DecodeFrom(d) + c.Supplement = new(consensus.V1BlockSupplement) + c.Supplement.DecodeFrom(d) } } // An ApplyUpdate reflects the changes to the blockchain resulting from the // addition of a block. type ApplyUpdate struct { + consensus.ApplyUpdate + Block types.Block State consensus.State // post-application - Diff consensus.BlockDiff } // A RevertUpdate reflects the changes to the blockchain resulting from the // removal of a block. type RevertUpdate struct { + consensus.RevertUpdate + Block types.Block State consensus.State // post-reversion, i.e. pre-application - Diff consensus.BlockDiff } // A Subscriber processes updates to the blockchain. Implementations must not @@ -79,14 +81,18 @@ type Subscriber interface { // A Store durably commits Manager-related data to storage. I/O errors must be // handled internally, e.g. by panicking or calling os.Exit. type Store interface { - consensus.Store + BestIndex(height uint64) (types.ChainIndex, bool) + AncestorTimestamp(id types.BlockID, n uint64) time.Time + SupplementTipTransaction(txn types.Transaction) consensus.V1TransactionSupplement + SupplementTipBlock(b types.Block) consensus.V1BlockSupplement AddCheckpoint(c Checkpoint) Checkpoint(id types.BlockID) (Checkpoint, bool) - // Except when mustCommit is set, ApplyDiff and RevertDiff are free to + + // Except when mustCommit is set, ApplyBlock and RevertBlock are free to // commit whenever they see fit. - ApplyDiff(s consensus.State, diff consensus.BlockDiff, mustCommit bool) (committed bool) - RevertDiff(s consensus.State, diff consensus.BlockDiff) + ApplyBlock(s consensus.State, cau consensus.ApplyUpdate, mustCommit bool) (committed bool) + RevertBlock(s consensus.State, cru consensus.RevertUpdate) } // A Manager tracks multiple blockchains and identifies the best valid @@ -220,7 +226,7 @@ func (m *Manager) AddBlocks(blocks []types.Block) error { } else if err := consensus.ValidateOrphan(cs, b); err != nil { return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: b.ID()}, err) } - cs = consensus.ApplyState(cs, m.store, b) + cs = consensus.ApplyWork(cs, b, m.store.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) m.store.AddCheckpoint(Checkpoint{b, cs, nil}) } @@ -245,9 +251,10 @@ func (m *Manager) revertTip() error { if !ok { return fmt.Errorf("missing checkpoint for block %v", c.Block.ParentID) } - m.store.RevertDiff(pc.State, *c.Diff) + cru := consensus.RevertBlock(pc.State, c.Block, *c.Supplement) + m.store.RevertBlock(pc.State, cru) - update := RevertUpdate{c.Block, pc.State, *c.Diff} + update := RevertUpdate{cru, c.Block, pc.State} for _, s := range m.subscribers { if err := s.ProcessChainRevertUpdate(&update); err != nil { return fmt.Errorf("subscriber %T: %w", s, err) @@ -260,33 +267,38 @@ func (m *Manager) revertTip() error { // applyTip adds a block to the current tip. func (m *Manager) applyTip(index types.ChainIndex) error { + var cau consensus.ApplyUpdate c, ok := m.store.Checkpoint(index.ID) if !ok { return fmt.Errorf("missing checkpoint for index %v", index) } else if c.Block.ParentID != m.tipState.Index.ID { panic("applyTip called with non-attaching block") - } - if c.Diff == nil { - if err := consensus.ValidateBlock(m.tipState, m.store, c.Block); err != nil { + } else if c.Supplement == nil { + bs := m.store.SupplementTipBlock(c.Block) + if err := consensus.ValidateBlock(m.tipState, c.Block, bs); err != nil { return fmt.Errorf("block %v is invalid: %w", index, err) } - diff := consensus.ApplyDiff(m.tipState, m.store, c.Block) - c.Diff = &diff + c.Supplement = &bs + targetTimestamp := m.store.AncestorTimestamp(c.Block.ParentID, m.tipState.AncestorDepth()) + c.State, cau = consensus.ApplyBlock(m.tipState, c.Block, bs, targetTimestamp) m.store.AddCheckpoint(c) + } else { + targetTimestamp := m.store.AncestorTimestamp(c.Block.ParentID, m.tipState.AncestorDepth()) + _, cau = consensus.ApplyBlock(m.tipState, c.Block, *c.Supplement, targetTimestamp) } // force the store to commit if we're at the tip (or close to it), or at // least every 2 seconds; this ensures that the amount of uncommitted data // never grows too large forceCommit := time.Since(c.Block.Timestamp) < c.State.BlockInterval()*2 || time.Since(m.lastCommit) > 2*time.Second - committed := m.store.ApplyDiff(c.State, *c.Diff, forceCommit) + committed := m.store.ApplyBlock(c.State, cau, forceCommit) if committed { m.lastCommit = time.Now() } - update := ApplyUpdate{c.Block, c.State, *c.Diff} + update := &ApplyUpdate{cau, c.Block, c.State} for _, s := range m.subscribers { - if err := s.ProcessChainApplyUpdate(&update, committed); err != nil { + if err := s.ProcessChainApplyUpdate(update, committed); err != nil { return fmt.Errorf("subscriber %T: %w", s, err) } } @@ -390,14 +402,15 @@ func (m *Manager) AddSubscriber(s Subscriber, tip types.ChainIndex) error { c, ok := m.store.Checkpoint(index.ID) if !ok { return fmt.Errorf("missing revert checkpoint %v", index) - } else if c.Diff == nil { - panic("missing diff for reverted block") + } else if c.Supplement == nil { + panic("missing supplement for reverted block") } pc, ok := m.store.Checkpoint(c.Block.ParentID) if !ok { return fmt.Errorf("missing revert parent checkpoint %v", c.Block.ParentID) } - if err := s.ProcessChainRevertUpdate(&RevertUpdate{c.Block, pc.State, *c.Diff}); err != nil { + cru := consensus.RevertBlock(pc.State, c.Block, *c.Supplement) + if err := s.ProcessChainRevertUpdate(&RevertUpdate{cru, c.Block, pc.State}); err != nil { return fmt.Errorf("couldn't process revert update: %w", err) } } @@ -405,12 +418,17 @@ func (m *Manager) AddSubscriber(s Subscriber, tip types.ChainIndex) error { c, ok := m.store.Checkpoint(index.ID) if !ok { return fmt.Errorf("missing apply checkpoint %v", index) - } else if c.Diff == nil { - panic("missing diff for applied block") + } else if c.Supplement == nil { + panic("missing supplement for applied block") + } + pc, ok := m.store.Checkpoint(c.Block.ParentID) + if !ok { + return fmt.Errorf("missing apply parent checkpoint %v", c.Block.ParentID) } + _, cau := consensus.ApplyBlock(pc.State, c.Block, *c.Supplement, m.store.AncestorTimestamp(c.Block.ParentID, pc.State.AncestorDepth())) // TODO: commit every minute for large len(apply)? shouldCommit := index == m.tipState.Index - if err := s.ProcessChainApplyUpdate(&ApplyUpdate{c.Block, c.State, *c.Diff}, shouldCommit); err != nil { + if err := s.ProcessChainApplyUpdate(&ApplyUpdate{cau, c.Block, c.State}, shouldCommit); err != nil { return fmt.Errorf("couldn't process apply update: %w", err) } } @@ -478,8 +496,9 @@ func (m *Manager) revalidatePool() { m.txpool.txns = m.txpool.txns[:0] m.txpool.ms = consensus.NewMidState(m.tipState) for _, txn := range txns { - if consensus.ValidateTransaction(m.txpool.ms, m.store, txn) == nil { - m.txpool.ms.ApplyTransaction(m.store, txn) + ts := m.store.SupplementTipTransaction(txn) + if consensus.ValidateTransaction(m.txpool.ms, txn, ts) == nil { + m.txpool.ms.ApplyTransaction(txn, ts) m.txpool.indices[txn.ID()] = len(m.txpool.txns) m.txpool.txns = append(m.txpool.txns, txn) m.txpool.weight += m.tipState.TransactionWeight(txn) @@ -690,10 +709,11 @@ func (m *Manager) AddPoolTransactions(txns []types.Transaction) error { // validate as a standalone set ms := consensus.NewMidState(m.tipState) for _, txn := range txns { - if err := consensus.ValidateTransaction(ms, m.store, txn); err != nil { + ts := m.store.SupplementTipTransaction(txn) + if err := consensus.ValidateTransaction(ms, txn, ts); err != nil { return fmt.Errorf("transaction %v is invalid: %v", txn.ID(), err) } - ms.ApplyTransaction(m.store, txn) + ms.ApplyTransaction(txn, ts) } for _, txn := range txns { @@ -701,7 +721,8 @@ func (m *Manager) AddPoolTransactions(txns []types.Transaction) error { if _, ok := m.txpool.indices[txid]; ok { continue // skip transactions already in pool } - m.txpool.ms.ApplyTransaction(m.store, txn) + ts := m.store.SupplementTipTransaction(txn) + m.txpool.ms.ApplyTransaction(txn, ts) m.txpool.indices[txid] = len(m.txpool.txns) m.txpool.txns = append(m.txpool.txns, txn) m.txpool.weight += m.tipState.TransactionWeight(txn) diff --git a/chain/manager_test.go b/chain/manager_test.go index 1d22a021..28903fc5 100644 --- a/chain/manager_test.go +++ b/chain/manager_test.go @@ -60,7 +60,7 @@ func TestManager(t *testing.T) { }}, } findBlockNonce(cs, &b) - cs = consensus.ApplyState(cs, store, b) + cs, _ = consensus.ApplyBlock(cs, b, store.SupplementTipBlock(b), store.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) blocks = append(blocks, b) } return diff --git a/consensus/merkle.go b/consensus/merkle.go index 69497a44..e32f3098 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -51,7 +51,7 @@ func storageProofRoot(leafHash types.Hash256, leafIndex uint64, filesize uint64, // An ElementLeaf represents a leaf in the ElementAccumulator Merkle tree. type ElementLeaf struct { - types.StateElement + *types.StateElement ElementHash types.Hash256 Spent bool } @@ -74,7 +74,7 @@ func (l ElementLeaf) ProofRoot() types.Hash256 { } // SiacoinLeaf returns the ElementLeaf for a SiacoinElement. -func SiacoinLeaf(e types.SiacoinElement, spent bool) ElementLeaf { +func SiacoinLeaf(e *types.SiacoinElement, spent bool) ElementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() @@ -83,14 +83,14 @@ func SiacoinLeaf(e types.SiacoinElement, spent bool) ElementLeaf { e.SiacoinOutput.EncodeTo(h.E) h.E.WriteUint64(e.MaturityHeight) return ElementLeaf{ - StateElement: e.StateElement, + StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } // SiafundLeaf returns the ElementLeaf for a SiafundElement. -func SiafundLeaf(e types.SiafundElement, spent bool) ElementLeaf { +func SiafundLeaf(e *types.SiafundElement, spent bool) ElementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() @@ -99,14 +99,14 @@ func SiafundLeaf(e types.SiafundElement, spent bool) ElementLeaf { e.SiafundOutput.EncodeTo(h.E) e.ClaimStart.EncodeTo(h.E) return ElementLeaf{ - StateElement: e.StateElement, + StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } // FileContractLeaf returns the ElementLeaf for a FileContractElement. -func FileContractLeaf(e types.FileContractElement, spent bool) ElementLeaf { +func FileContractLeaf(e *types.FileContractElement, spent bool) ElementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() @@ -114,14 +114,14 @@ func FileContractLeaf(e types.FileContractElement, spent bool) ElementLeaf { e.ID.EncodeTo(h.E) e.FileContract.EncodeTo(h.E) return ElementLeaf{ - StateElement: e.StateElement, + StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } // V2FileContractLeaf returns the ElementLeaf for a V2FileContractElement. -func V2FileContractLeaf(e types.V2FileContractElement, spent bool) ElementLeaf { +func V2FileContractLeaf(e *types.V2FileContractElement, spent bool) ElementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() @@ -129,25 +129,25 @@ func V2FileContractLeaf(e types.V2FileContractElement, spent bool) ElementLeaf { e.ID.EncodeTo(h.E) e.V2FileContract.EncodeTo(h.E) return ElementLeaf{ - StateElement: e.StateElement, + StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } type accumulator struct { - trees [64]types.Hash256 - numLeaves uint64 + Trees [64]types.Hash256 + NumLeaves uint64 } func (acc *accumulator) hasTreeAtHeight(height int) bool { - return acc.numLeaves&(1< startOfNewTree && j >= 0; j-- { - element(j).MerkleProof = append(element(j).MerkleProof, oldRoot) + leaves[j].MerkleProof = append(leaves[j].MerkleProof, oldRoot) } for ; j > startOfOldTree && j >= 0; j-- { - element(j).MerkleProof = append(element(j).MerkleProof, h) + leaves[j].MerkleProof = append(leaves[j].MerkleProof, h) } // Record the left- and right-hand roots in treeGrowth, where // applicable. - curTreeIndex := (acc.numLeaves + 1) - 1<= mid }) - return ls[:split], ls[split:] -} - -// updateLeaves overwrites the specified leaves in the accumulator. It updates -// the Merkle proofs of each leaf, and returns the leaves (grouped by tree) for -// later use. -func (acc *ElementAccumulator) updateLeaves(diff *BlockDiff) [64][]ElementLeaf { - var leaves []ElementLeaf - for _, tdiff := range diff.Transactions { - for _, sce := range tdiff.SpentSiacoinElements { - leaves = append(leaves, SiacoinLeaf(sce, true)) - } - for _, sfe := range tdiff.SpentSiafundElements { - leaves = append(leaves, SiafundLeaf(sfe, true)) - } - for _, fce := range tdiff.ValidFileContracts { - leaves = append(leaves, FileContractLeaf(fce, true)) - } - for _, fcer := range tdiff.RevisedFileContracts { - leaves = append(leaves, FileContractLeaf(fcer.Parent, false)) - } - } - for _, fce := range diff.MissedFileContracts { - leaves = append(leaves, FileContractLeaf(fce, true)) - } - for _, tdiff := range diff.V2Transactions { - for _, sce := range tdiff.SpentSiacoinElements { - leaves = append(leaves, SiacoinLeaf(sce, true)) - } - for _, sfe := range tdiff.SpentSiafundElements { - leaves = append(leaves, SiafundLeaf(sfe, true)) - } - for _, fcr := range tdiff.RevisedFileContracts { - fce := fcr.Parent - fce.V2FileContract = fcr.Revision - leaves = append(leaves, V2FileContractLeaf(fce, false)) - } - for _, res := range tdiff.ResolvedFileContracts { - fce := res.Parent - switch r := res.Resolution.(type) { - case types.V2FileContractRenewal: - fce.V2FileContract = r.FinalRevision - case types.V2FileContract: // finalization - fce.V2FileContract = r - } - leaves = append(leaves, V2FileContractLeaf(fce, true)) - } - } - - // Group leaves by tree, and sort them by leaf index. - var trees [64][]ElementLeaf - sort.Slice(leaves, func(i, j int) bool { - if len(leaves[i].MerkleProof) != len(leaves[j].MerkleProof) { - return len(leaves[i].MerkleProof) < len(leaves[j].MerkleProof) - } - return leaves[i].LeafIndex < leaves[j].LeafIndex - }) - for len(leaves) > 0 { - i := 0 - for i < len(leaves) && len(leaves[i].MerkleProof) == len(leaves[0].MerkleProof) { - i++ - } - trees[len(leaves[0].MerkleProof)] = leaves[:i] - leaves = leaves[i:] +// updateLeaves updates the Merkle proofs of each leaf to reflect the changes in +// all other leaves, and returns the leaves (grouped by tree) for later use. +func updateLeaves(leaves []ElementLeaf) [64][]ElementLeaf { + splitLeaves := func(ls []ElementLeaf, mid uint64) (left, right []ElementLeaf) { + split := sort.Search(len(ls), func(i int) bool { return ls[i].LeafIndex >= mid }) + return ls[:split], ls[split:] } var recompute func(i, j uint64, leaves []ElementLeaf) types.Hash256 @@ -449,92 +326,76 @@ func (acc *ElementAccumulator) updateLeaves(diff *BlockDiff) [64][]ElementLeaf { leftRoot = right[0].MerkleProof[height-1] } else { leftRoot = recompute(i, mid, left) - for i := range right { - right[i].MerkleProof[height-1] = leftRoot + for _, e := range right { + e.MerkleProof[height-1] = leftRoot } } if len(right) == 0 { rightRoot = left[0].MerkleProof[height-1] } else { rightRoot = recompute(mid, j, right) - for i := range left { - left[i].MerkleProof[height-1] = rightRoot + for _, e := range left { + e.MerkleProof[height-1] = rightRoot } } return blake2b.SumPair(leftRoot, rightRoot) } - // Recompute the root of each tree with updated leaves, and fill in the - // proof of each leaf. + // Group leaves by tree, and sort them by leaf index. + var trees [64][]ElementLeaf + sort.Slice(leaves, func(i, j int) bool { + if len(leaves[i].MerkleProof) != len(leaves[j].MerkleProof) { + return len(leaves[i].MerkleProof) < len(leaves[j].MerkleProof) + } + return leaves[i].LeafIndex < leaves[j].LeafIndex + }) + for len(leaves) > 0 { + i := 0 + for i < len(leaves) && len(leaves[i].MerkleProof) == len(leaves[0].MerkleProof) { + i++ + } + trees[len(leaves[0].MerkleProof)] = leaves[:i] + leaves = leaves[i:] + } + + // Update the proofs within each tree by recursively recomputing the total + // root. for height, leaves := range &trees { if len(leaves) == 0 { continue } // Determine the range of leaf indices that comprise this tree. We can - // compute this efficiently by zeroing the least-significant bits of - // numLeaves. (Zeroing these bits is equivalent to subtracting the - // number of leaves in all trees smaller than this one.) - start := clearBits(acc.numLeaves, height+1) + // compute this efficiently by zeroing the least-significant bits of the + // leaf index. + start := clearBits(leaves[0].LeafIndex, height) end := start + 1< 0 { + acc.Trees[height] = es[0].ProofRoot() + } + } + eau.treeGrowth = acc.addLeaves(added) + for _, e := range updated { + e.MerkleProof = append(e.MerkleProof, eau.treeGrowth[len(e.MerkleProof)]...) + } return eau } -// RevertBlock produces an update from the supplied leaves. The accumulator is -// not modified. +// RevertBlock modifies the proofs of supplied elements such that they validate +// under acc, which must be the accumulator prior to the application of those +// elements. The accumulator itself is not modified. func (acc *ElementAccumulator) RevertBlock(updated []ElementLeaf) (eru ElementRevertUpdate) { - eru.numLeaves = acc.numLeaves - for _, l := range updated { - eru.updated[len(l.MerkleProof)] = append(eru.updated[len(l.MerkleProof)], l) - } + eru.updated = updateLeaves(updated) + eru.numLeaves = acc.NumLeaves return } @@ -623,7 +484,7 @@ type HistoryAccumulator struct { // Contains returns true if the accumulator contains the given index. func (acc *HistoryAccumulator) Contains(index types.ChainIndex, proof []types.Hash256) bool { - return acc.hasTreeAtHeight(len(proof)) && acc.trees[len(proof)] == historyProofRoot(index, proof) + return acc.hasTreeAtHeight(len(proof)) && acc.Trees[len(proof)] == historyProofRoot(index, proof) } // ApplyBlock integrates a ChainIndex into the accumulator, producing a @@ -632,12 +493,12 @@ func (acc *HistoryAccumulator) ApplyBlock(index types.ChainIndex) (hau HistoryAp h := historyLeafHash(index) i := 0 for ; acc.hasTreeAtHeight(i); i++ { - hau.proof = append(hau.proof, acc.trees[i]) + hau.proof = append(hau.proof, acc.Trees[i]) hau.growth = append(hau.growth, h) - h = blake2b.SumPair(acc.trees[i], h) + h = blake2b.SumPair(acc.Trees[i], h) } - acc.trees[i] = h - acc.numLeaves++ + acc.Trees[i] = h + acc.NumLeaves++ return } diff --git a/consensus/state.go b/consensus/state.go index 21b9bead..a5264915 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -19,21 +19,6 @@ import ( // implementation whose constructor returns a concrete type. var hasherPool = &sync.Pool{New: func() interface{} { return types.NewHasher() }} -// A Store stores blocks, siacoin outputs, siafund outputs, and file contracts. -// -// Store methods do not return errors. If a store encounters an error, it should -// save the error and thereafter return empty values from all methods. It is the -// caller's responsibility to check this error, and, if non-nil, discard any -// results computed. -type Store interface { - BestIndex(height uint64) (types.ChainIndex, bool) - AncestorTimestamp(id types.BlockID, n uint64) time.Time - SiacoinElement(id types.SiacoinOutputID) (types.SiacoinElement, bool) - SiafundElement(id types.SiafundOutputID) (types.SiafundElement, bool) - FileContractElement(id types.FileContractID) (types.FileContractElement, bool) - MissedFileContracts(height uint64) []types.FileContractID -} - // A Network specifies the fixed parameters of a Sia blockchain. type Network struct { Name string `json:"name"` @@ -199,6 +184,12 @@ func (s State) SiafundCount() uint64 { return 10000 } +// AncestorDepth is the depth used to determine the target timestamp in the +// pre-Oak difficulty adjustment algorithm. +func (s State) AncestorDepth() uint64 { + return 1000 +} + // FoundationSubsidy returns the Foundation subsidy output for the child block. // If no subsidy is due, the returned output has a value of zero. func (s State) FoundationSubsidy() (sco types.SiacoinOutput) { @@ -279,7 +270,7 @@ func (s State) V2FileContractTax(fc types.V2FileContract) types.Currency { // StorageProofLeafIndex returns the leaf index used when computing or // validating a storage proof. -func (s State) StorageProofLeafIndex(filesize uint64, windowStart types.ChainIndex, fcid types.FileContractID) uint64 { +func (s State) StorageProofLeafIndex(filesize uint64, windowID types.BlockID, fcid types.FileContractID) uint64 { const leafSize = uint64(len(types.StorageProof{}.Leaf)) numLeaves := filesize / leafSize if filesize%leafSize != 0 { @@ -288,7 +279,7 @@ func (s State) StorageProofLeafIndex(filesize uint64, windowStart types.ChainInd if numLeaves <= 0 { return 0 } - seed := types.HashBytes(append(windowStart.ID[:], fcid[:]...)) + seed := types.HashBytes(append(windowID[:], fcid[:]...)) var r uint64 for i := 0; i < len(seed); i += 8 { _, r = bits.Div64(r, binary.BigEndian.Uint64(seed[i:]), numLeaves) diff --git a/consensus/store.go b/consensus/store.go new file mode 100644 index 00000000..68f9edc3 --- /dev/null +++ b/consensus/store.go @@ -0,0 +1,125 @@ +package consensus + +import ( + "go.sia.tech/core/types" +) + +// A V1TransactionSupplement contains elements that are associated with a v1 +// transaction, but not included in the transaction. For example, v1 +// transactions reference the ID of each SiacoinOutput they spend, but do not +// contain the output itself. Consequently, in order to validate the +// transaction, those outputs must be loaded from a Store. Collecting these +// elements into an explicit struct allows us to preserve them even after the +// Store has been mutated. +type V1TransactionSupplement struct { + SiacoinInputs []types.SiacoinElement + SiafundInputs []types.SiafundElement + RevisedFileContracts []types.FileContractElement + ValidFileContracts []types.FileContractElement + StorageProofBlockIDs []types.BlockID +} + +// EncodeTo implements types.EncoderTo. +func (ts V1TransactionSupplement) EncodeTo(e *types.Encoder) { + e.WritePrefix(len(ts.SiacoinInputs)) + for i := range ts.SiacoinInputs { + ts.SiacoinInputs[i].EncodeTo(e) + } + e.WritePrefix(len(ts.SiafundInputs)) + for i := range ts.SiafundInputs { + ts.SiafundInputs[i].EncodeTo(e) + } + e.WritePrefix(len(ts.RevisedFileContracts)) + for i := range ts.RevisedFileContracts { + ts.RevisedFileContracts[i].EncodeTo(e) + } + e.WritePrefix(len(ts.ValidFileContracts)) + for i := range ts.ValidFileContracts { + ts.ValidFileContracts[i].EncodeTo(e) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (ts *V1TransactionSupplement) DecodeFrom(d *types.Decoder) { + ts.SiacoinInputs = make([]types.SiacoinElement, d.ReadPrefix()) + for i := range ts.SiacoinInputs { + ts.SiacoinInputs[i].DecodeFrom(d) + } + ts.SiafundInputs = make([]types.SiafundElement, d.ReadPrefix()) + for i := range ts.SiafundInputs { + ts.SiafundInputs[i].DecodeFrom(d) + } + ts.RevisedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + for i := range ts.RevisedFileContracts { + ts.RevisedFileContracts[i].DecodeFrom(d) + } + ts.ValidFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + for i := range ts.ValidFileContracts { + ts.ValidFileContracts[i].DecodeFrom(d) + } +} + +func (ts V1TransactionSupplement) siacoinElement(id types.SiacoinOutputID) (sce types.SiacoinElement, ok bool) { + for _, sce := range ts.SiacoinInputs { + if types.SiacoinOutputID(sce.ID) == id { + return sce, true + } + } + return +} + +func (ts V1TransactionSupplement) siafundElement(id types.SiafundOutputID) (sce types.SiafundElement, ok bool) { + for _, sfe := range ts.SiafundInputs { + if types.SiafundOutputID(sfe.ID) == id { + return sfe, true + } + } + return +} + +func (ts V1TransactionSupplement) fileContractElement(id types.FileContractID) (sce types.FileContractElement, ok bool) { + for _, fce := range ts.RevisedFileContracts { + if types.FileContractID(fce.ID) == id { + return fce, true + } + } + for _, fce := range ts.ValidFileContracts { + if types.FileContractID(fce.ID) == id { + return fce, true + } + } + return +} + +// A V1BlockSupplement contains elements that are associated with a v1 block, +// but not included in the block. This includes supplements for each v1 +// transaction, as well as any file contracts that expired at the block's +// height. +type V1BlockSupplement struct { + Transactions []V1TransactionSupplement + ExpiringFileContracts []types.FileContractElement +} + +// EncodeTo implements types.EncoderTo. +func (bs V1BlockSupplement) EncodeTo(e *types.Encoder) { + e.WritePrefix(len(bs.Transactions)) + for i := range bs.Transactions { + bs.Transactions[i].EncodeTo(e) + } + e.WritePrefix(len(bs.ExpiringFileContracts)) + for i := range bs.ExpiringFileContracts { + bs.ExpiringFileContracts[i].EncodeTo(e) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (bs *V1BlockSupplement) DecodeFrom(d *types.Decoder) { + bs.Transactions = make([]V1TransactionSupplement, d.ReadPrefix()) + for i := range bs.Transactions { + bs.Transactions[i].DecodeFrom(d) + } + bs.ExpiringFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + for i := range bs.ExpiringFileContracts { + bs.ExpiringFileContracts[i].DecodeFrom(d) + } +} diff --git a/consensus/update.go b/consensus/update.go index 26b694eb..276b9d40 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -55,7 +55,7 @@ func updateOakTarget(s State) types.BlockID { return addTarget(mulTargetFrac(s.OakTarget, 1000, 995), s.ChildTarget) } -func adjustTarget(s State, blockTimestamp time.Time, store Store) types.BlockID { +func adjustTarget(s State, blockTimestamp time.Time, targetTimestamp time.Time) types.BlockID { blockInterval := int64(s.BlockInterval() / time.Second) // pre-Oak algorithm @@ -68,7 +68,6 @@ func adjustTarget(s State, blockTimestamp time.Time, store Store) types.BlockID if windowSize > s.childHeight() { ancestorDepth = s.childHeight() } - targetTimestamp := store.AncestorTimestamp(s.Index.ID, ancestorDepth) elapsed := int64(blockTimestamp.Sub(targetTimestamp) / time.Second) expected := blockInterval * int64(ancestorDepth) // clamp @@ -152,552 +151,404 @@ func adjustTarget(s State, blockTimestamp time.Time, store Store) types.BlockID return newTarget } -// ApplyState applies b to s, returning the resulting state. -func ApplyState(s State, store Store, b types.Block) State { +// ApplyWork applies the work of b to s, returning the resulting state. Only the +// PoW-related fields are updated. +func ApplyWork(s State, b types.Block, targetTimestamp time.Time) State { if s.Index.Height > 0 && s.Index.ID != b.ParentID { panic("consensus: cannot apply non-child block") } - siafundPool := s.SiafundPool - for _, txn := range b.Transactions { - for _, fc := range txn.FileContracts { - siafundPool = siafundPool.Add(s.FileContractTax(fc)) - } - } - if b.V2 != nil { - for _, txn := range b.V2.Transactions { - for _, fc := range txn.FileContracts { - siafundPool = siafundPool.Add(s.V2FileContractTax(fc)) - } - } - } - - // update state - newFoundationPrimaryAddress := s.FoundationPrimaryAddress - newFoundationFailsafeAddress := s.FoundationFailsafeAddress - if s.Index.Height >= s.Network.HardforkFoundation.Height { - outer: - for _, txn := range b.Transactions { - for _, arb := range txn.ArbitraryData { - if bytes.HasPrefix(arb, types.SpecifierFoundation[:]) { - var update types.FoundationAddressUpdate - update.DecodeFrom(types.NewBufDecoder(arb[len(types.SpecifierFoundation):])) - newFoundationPrimaryAddress = update.NewPrimary - newFoundationFailsafeAddress = update.NewFailsafe - break outer // Foundation addresses can only be updated once per block - } - } - } - } - if b.V2 != nil { - for _, txn := range b.V2.Transactions { - if txn.NewFoundationAddress != nil { - newFoundationPrimaryAddress = *txn.NewFoundationAddress - newFoundationFailsafeAddress = *txn.NewFoundationAddress - } - } - } - if b.ParentID == (types.BlockID{}) { // special handling for genesis block - return State{ - Network: s.Network, - - Index: types.ChainIndex{Height: 0, ID: b.ID()}, - PrevTimestamps: [11]time.Time{0: b.Timestamp}, - Depth: s.Depth, - ChildTarget: s.ChildTarget, - SiafundPool: siafundPool, - - OakTime: updateOakTime(s, b.Timestamp, b.Timestamp), - OakTarget: updateOakTarget(s), - FoundationPrimaryAddress: newFoundationPrimaryAddress, - FoundationFailsafeAddress: newFoundationFailsafeAddress, - - History: s.History, - Elements: s.Elements, - } - } - - prevTimestamps := s.PrevTimestamps - copy(prevTimestamps[1:], s.PrevTimestamps[:]) - prevTimestamps[0] = b.Timestamp - return State{ - Network: s.Network, - - Index: types.ChainIndex{Height: s.Index.Height + 1, ID: b.ID()}, - PrevTimestamps: prevTimestamps, - Depth: addTarget(s.Depth, s.ChildTarget), - ChildTarget: adjustTarget(s, b.Timestamp, store), - SiafundPool: siafundPool, - - OakTime: updateOakTime(s, b.Timestamp, s.PrevTimestamps[0]), - OakTarget: updateOakTarget(s), - FoundationPrimaryAddress: newFoundationPrimaryAddress, - FoundationFailsafeAddress: newFoundationFailsafeAddress, - - History: s.History, - Elements: s.Elements, + s.OakTime = updateOakTime(s, b.Timestamp, b.Timestamp) + s.OakTarget = updateOakTarget(s) + s.Index = types.ChainIndex{Height: 0, ID: b.ID()} + } else { + s.Depth = addTarget(s.Depth, s.ChildTarget) + s.ChildTarget = adjustTarget(s, b.Timestamp, targetTimestamp) + s.OakTime = updateOakTime(s, b.Timestamp, s.PrevTimestamps[0]) + s.OakTarget = updateOakTarget(s) + s.Index = types.ChainIndex{Height: s.Index.Height + 1, ID: b.ID()} } -} + copy(s.PrevTimestamps[1:], s.PrevTimestamps[:]) + s.PrevTimestamps[0] = b.Timestamp + return s -// v2SiacoinOutputID returns the ID of the i'th siacoin output created by the -// transaction. -func v2SiacoinOutputID(txid types.TransactionID, i int) types.SiacoinOutputID { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - types.SpecifierSiacoinOutput.EncodeTo(h.E) - txid.EncodeTo(h.E) - h.E.WriteUint64(uint64(i)) - return types.SiacoinOutputID(h.Sum()) } -// v2SiafundOutputID returns the ID of the i'th siafund output created by the -// transaction. -func v2SiafundOutputID(txid types.TransactionID, i int) types.SiafundOutputID { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - types.SpecifierSiafundOutput.EncodeTo(h.E) - txid.EncodeTo(h.E) - h.E.WriteUint64(uint64(i)) - return types.SiafundOutputID(h.Sum()) +func (ms *MidState) addedLeaf(id types.Hash256) *ElementLeaf { + for i := range ms.added { + if ms.added[i].ID == id { + return &ms.added[i] + } + } + return nil } -// v2FileContractID returns the ID of the i'th file contract created by the -// transaction. -func v2FileContractID(txid types.TransactionID, i int) types.FileContractID { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - types.SpecifierFileContract.EncodeTo(h.E) - txid.EncodeTo(h.E) - h.E.WriteUint64(uint64(i)) - return types.FileContractID(h.Sum()) +func (ms *MidState) addSiacoinElement(sce types.SiacoinElement) { + ms.sces = append(ms.sces, sce) + ms.added = append(ms.added, SiacoinLeaf(&ms.sces[len(ms.sces)-1], false)) + ms.ephemeral[ms.sces[len(ms.sces)-1].ID] = len(ms.sces) - 1 } -// A TransactionDiff represents the changes to an ElementStore resulting from -// the application of a transaction. -type TransactionDiff struct { - CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` - CreatedSiafundElements []types.SiafundElement `json:"createdSiafundElements,omitempty"` - CreatedFileContracts []types.FileContractElement `json:"createdFileContracts,omitempty"` - - SpentSiacoinElements []types.SiacoinElement `json:"spentSiacoinElements,omitempty"` - SpentSiafundElements []types.SiafundElement `json:"spentSiafundElements,omitempty"` - RevisedFileContracts []types.FileContractElementRevision `json:"revisedFileContracts,omitempty"` - ValidFileContracts []types.FileContractElement `json:"validFileContracts,omitempty"` +func (ms *MidState) spendSiacoinElement(sce types.SiacoinElement, txid types.TransactionID) { + ms.spends[sce.ID] = txid + if _, ok := ms.ephemeral[sce.ID]; ok { + ms.addedLeaf(sce.ID).Spent = true + } else { + sce.MerkleProof = append([]types.Hash256(nil), sce.MerkleProof...) + ms.sces = append(ms.sces, sce) + ms.updated = append(ms.updated, SiacoinLeaf(&ms.sces[len(ms.sces)-1], true)) + } } -// EncodeTo implements types.EncoderTo. -func (td TransactionDiff) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(td.CreatedSiacoinElements)) - for i := range td.CreatedSiacoinElements { - td.CreatedSiacoinElements[i].EncodeTo(e) - } - e.WritePrefix(len(td.CreatedSiafundElements)) - for i := range td.CreatedSiafundElements { - td.CreatedSiafundElements[i].EncodeTo(e) - } - e.WritePrefix(len(td.CreatedFileContracts)) - for i := range td.CreatedFileContracts { - td.CreatedFileContracts[i].EncodeTo(e) - } - e.WritePrefix(len(td.SpentSiacoinElements)) - for i := range td.SpentSiacoinElements { - td.SpentSiacoinElements[i].EncodeTo(e) - } - e.WritePrefix(len(td.SpentSiafundElements)) - for i := range td.SpentSiafundElements { - td.SpentSiafundElements[i].EncodeTo(e) - } - e.WritePrefix(len(td.RevisedFileContracts)) - for i := range td.RevisedFileContracts { - td.RevisedFileContracts[i].EncodeTo(e) - } - e.WritePrefix(len(td.ValidFileContracts)) - for i := range td.ValidFileContracts { - td.ValidFileContracts[i].EncodeTo(e) - } +func (ms *MidState) addSiafundElement(sfe types.SiafundElement) { + ms.sfes = append(ms.sfes, sfe) + ms.added = append(ms.added, SiafundLeaf(&ms.sfes[len(ms.sfes)-1], false)) + ms.ephemeral[ms.sfes[len(ms.sfes)-1].ID] = len(ms.sfes) - 1 } -// DecodeFrom implements types.DecoderFrom. -func (td *TransactionDiff) DecodeFrom(d *types.Decoder) { - td.CreatedSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) - for i := range td.CreatedSiacoinElements { - td.CreatedSiacoinElements[i].DecodeFrom(d) - } - td.CreatedSiafundElements = make([]types.SiafundElement, d.ReadPrefix()) - for i := range td.CreatedSiafundElements { - td.CreatedSiafundElements[i].DecodeFrom(d) - } - td.CreatedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) - for i := range td.CreatedFileContracts { - td.CreatedFileContracts[i].DecodeFrom(d) - } - td.SpentSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) - for i := range td.SpentSiacoinElements { - td.SpentSiacoinElements[i].DecodeFrom(d) - } - td.SpentSiafundElements = make([]types.SiafundElement, d.ReadPrefix()) - for i := range td.SpentSiafundElements { - td.SpentSiafundElements[i].DecodeFrom(d) - } - td.RevisedFileContracts = make([]types.FileContractElementRevision, d.ReadPrefix()) - for i := range td.RevisedFileContracts { - td.RevisedFileContracts[i].DecodeFrom(d) - } - td.ValidFileContracts = make([]types.FileContractElement, d.ReadPrefix()) - for i := range td.ValidFileContracts { - td.ValidFileContracts[i].DecodeFrom(d) +func (ms *MidState) spendSiafundElement(sfe types.SiafundElement, txid types.TransactionID) { + ms.spends[sfe.ID] = txid + if _, ok := ms.ephemeral[sfe.ID]; ok { + ms.addedLeaf(sfe.ID).Spent = true + } else { + sfe.MerkleProof = append([]types.Hash256(nil), sfe.MerkleProof...) + ms.sfes = append(ms.sfes, sfe) + ms.updated = append(ms.updated, SiafundLeaf(&ms.sfes[len(ms.sfes)-1], true)) } } -// A V2TransactionDiff contains the elements added to the state accumulator by a -// v2 transaction. -type V2TransactionDiff struct { - CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` - CreatedSiafundElements []types.SiafundElement `json:"createdSiafundElements,omitempty"` - CreatedFileContracts []types.V2FileContractElement `json:"createdFileContracts,omitempty"` - - // NOTE: these fields are all easily derived from the block itself; we - // include them for convenience - SpentSiacoinElements []types.SiacoinElement `json:"spentSiacoinElements,omitempty"` - SpentSiafundElements []types.SiafundElement `json:"spentSiafundElements,omitempty"` - RevisedFileContracts []types.V2FileContractRevision `json:"revisedFileContracts,omitempty"` - ResolvedFileContracts []types.V2FileContractResolution `json:"resolvedFileContracts,omitempty"` +func (ms *MidState) addFileContractElement(fce types.FileContractElement) { + ms.fces = append(ms.fces, fce) + ms.added = append(ms.added, FileContractLeaf(&ms.fces[len(ms.fces)-1], false)) + ms.ephemeral[ms.fces[len(ms.fces)-1].ID] = len(ms.fces) - 1 + ms.siafundPool = ms.siafundPool.Add(ms.base.FileContractTax(fce.FileContract)) } -// EncodeTo implements types.EncoderTo. -func (td V2TransactionDiff) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(td.CreatedSiacoinElements)) - for i := range td.CreatedSiacoinElements { - td.CreatedSiacoinElements[i].EncodeTo(e) - } - e.WritePrefix(len(td.CreatedSiafundElements)) - for i := range td.CreatedSiafundElements { - td.CreatedSiafundElements[i].EncodeTo(e) - } - e.WritePrefix(len(td.CreatedFileContracts)) - for i := range td.CreatedFileContracts { - td.CreatedFileContracts[i].EncodeTo(e) +func (ms *MidState) reviseFileContractElement(fce types.FileContractElement, rev types.FileContract) { + rev.Payout = fce.FileContract.Payout + if i, ok := ms.ephemeral[fce.ID]; ok { + ms.fces[i].FileContract = rev + *ms.addedLeaf(fce.ID) = FileContractLeaf(&ms.fces[i], false) + } else { + if r, ok := ms.revs[fce.ID]; ok { + r.FileContract = rev + for i := range ms.updated { + if ms.updated[i].ID == fce.ID { + ms.updated[i] = FileContractLeaf(r, false) + break + } + } + } else { + // store the original + fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) + ms.fces = append(ms.fces, fce) + // store the revision + fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) + fce.FileContract = rev + ms.revs[fce.ID] = &fce + ms.updated = append(ms.updated, FileContractLeaf(&fce, false)) + } } } -// DecodeFrom implements types.DecoderFrom. -func (td *V2TransactionDiff) DecodeFrom(d *types.Decoder) { - td.CreatedSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) - for i := range td.CreatedSiacoinElements { - td.CreatedSiacoinElements[i].DecodeFrom(d) - } - td.CreatedSiafundElements = make([]types.SiafundElement, d.ReadPrefix()) - for i := range td.CreatedSiafundElements { - td.CreatedSiafundElements[i].DecodeFrom(d) - } - td.CreatedFileContracts = make([]types.V2FileContractElement, d.ReadPrefix()) - for i := range td.CreatedFileContracts { - td.CreatedFileContracts[i].DecodeFrom(d) - } +func (ms *MidState) resolveFileContractElement(fce types.FileContractElement, txid types.TransactionID) { + ms.spends[fce.ID] = txid + fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) + ms.fces = append(ms.fces, fce) + ms.updated = append(ms.updated, FileContractLeaf(&ms.fces[len(ms.fces)-1], true)) } -// A BlockDiff represents the changes to blockchain state resulting from the -// application of a block. -type BlockDiff struct { - Transactions []TransactionDiff `json:"transactions,omitempty"` - V2Transactions []V2TransactionDiff `json:"v2Transactions,omitempty"` - CreatedSiacoinElements []types.SiacoinElement `json:"createdSiacoinElements,omitempty"` - MissedFileContracts []types.FileContractElement `json:"missedFileContracts,omitempty"` - ElementApplyUpdate ElementApplyUpdate `json:"-"` - HistoryApplyUpdate HistoryApplyUpdate `json:"-"` +func (ms *MidState) addV2FileContractElement(fce types.V2FileContractElement) { + ms.v2fces = append(ms.v2fces, fce) + ms.added = append(ms.added, V2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], false)) + ms.ephemeral[ms.v2fces[len(ms.v2fces)-1].ID] = len(ms.v2fces) - 1 + ms.siafundPool = ms.siafundPool.Add(ms.base.V2FileContractTax(fce.V2FileContract)) } -// EncodeTo implements types.EncoderTo. -func (bd BlockDiff) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(bd.Transactions)) - for i := range bd.Transactions { - bd.Transactions[i].EncodeTo(e) - } - e.WritePrefix(len(bd.V2Transactions)) - for i := range bd.V2Transactions { - bd.V2Transactions[i].EncodeTo(e) - } - e.WritePrefix(len(bd.CreatedSiacoinElements)) - for i := range bd.CreatedSiacoinElements { - bd.CreatedSiacoinElements[i].EncodeTo(e) - } - e.WritePrefix(len(bd.MissedFileContracts)) - for i := range bd.MissedFileContracts { - bd.MissedFileContracts[i].EncodeTo(e) - } +func (ms *MidState) reviseV2FileContractElement(fce types.V2FileContractElement, rev types.V2FileContract) { + fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) + ms.v2fces = append(ms.v2fces, fce) + ms.updated = append(ms.updated, FileContractLeaf(&ms.fces[len(ms.fces)-1], false)) } -// DecodeFrom implements types.DecoderFrom. -func (bd *BlockDiff) DecodeFrom(d *types.Decoder) { - bd.Transactions = make([]TransactionDiff, d.ReadPrefix()) - for i := range bd.Transactions { - bd.Transactions[i].DecodeFrom(d) - } - bd.V2Transactions = make([]V2TransactionDiff, d.ReadPrefix()) - for i := range bd.V2Transactions { - bd.V2Transactions[i].DecodeFrom(d) - } - bd.CreatedSiacoinElements = make([]types.SiacoinElement, d.ReadPrefix()) - for i := range bd.CreatedSiacoinElements { - bd.CreatedSiacoinElements[i].DecodeFrom(d) - } - bd.MissedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) - for i := range bd.MissedFileContracts { - bd.MissedFileContracts[i].DecodeFrom(d) - } +func (ms *MidState) resolveV2FileContractElement(fce types.V2FileContractElement, txid types.TransactionID) { + ms.spends[fce.ID] = txid + fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) + ms.v2fces = append(ms.v2fces, fce) + ms.updated = append(ms.updated, V2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], true)) } // ApplyTransaction applies a transaction to the MidState. -func (ms *MidState) ApplyTransaction(store Store, txn types.Transaction) { +func (ms *MidState) ApplyTransaction(txn types.Transaction, ts V1TransactionSupplement) { txid := txn.ID() for _, sci := range txn.SiacoinInputs { - ms.spends[types.Hash256(sci.ParentID)] = txid + ms.spendSiacoinElement(ms.mustSiacoinElement(ts, sci.ParentID), txid) } for i, sco := range txn.SiacoinOutputs { - scoid := txn.SiacoinOutputID(i) - ms.sces[scoid] = types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, - SiacoinOutput: sco, - MaturityHeight: 0, - } + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(txn.SiacoinOutputID(i))}, + SiacoinOutput: sco, + }) } for _, sfi := range txn.SiafundInputs { - ms.spends[types.Hash256(sfi.ParentID)] = txid + sfe := ms.mustSiafundElement(ts, sfi.ParentID) + claimPortion := ms.siafundPool.Sub(sfe.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfe.Value) + ms.spendSiafundElement(sfe, txid) + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(sfi.ParentID.ClaimOutputID())}, + SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, + MaturityHeight: ms.base.MaturityHeight(), + }) } for i, sfo := range txn.SiafundOutputs { - sfoid := txn.SiafundOutputID(i) - ms.sfes[sfoid] = types.SiafundElement{ - StateElement: types.StateElement{ID: types.Hash256(sfoid)}, + ms.addSiafundElement(types.SiafundElement{ + StateElement: types.StateElement{ID: types.Hash256(txn.SiafundOutputID(i))}, SiafundOutput: sfo, ClaimStart: ms.siafundPool, - } + }) } for i, fc := range txn.FileContracts { - fcid := txn.FileContractID(i) - ms.fces[fcid] = types.FileContractElement{ - StateElement: types.StateElement{ID: types.Hash256(fcid)}, + ms.addFileContractElement(types.FileContractElement{ + StateElement: types.StateElement{ID: types.Hash256(txn.FileContractID(i))}, FileContract: fc, - } - ms.siafundPool = ms.siafundPool.Add(ms.base.FileContractTax(fc)) + }) } for _, fcr := range txn.FileContractRevisions { - fce := ms.mustFileContractElement(store, fcr.ParentID) - ms.fces[contractRevisionID(fcr.ParentID, fcr.RevisionNumber)] = fce // store previous revision for Diff later - fcr.FileContract.Payout = fce.Payout - fce.FileContract = fcr.FileContract - ms.fces[fcr.ParentID] = fce + ms.reviseFileContractElement(ms.mustFileContractElement(ts, fcr.ParentID), fcr.FileContract) } for _, sp := range txn.StorageProofs { - ms.spends[types.Hash256(sp.ParentID)] = txid + fce := ms.mustFileContractElement(ts, sp.ParentID) + ms.resolveFileContractElement(fce, txid) + for i, sco := range fce.ValidProofOutputs { + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(sp.ParentID.ValidOutputID(i))}, + SiacoinOutput: sco, + MaturityHeight: ms.base.MaturityHeight(), + }) + } + } + if ms.base.Index.Height >= ms.base.Network.HardforkFoundation.Height { + for _, arb := range txn.ArbitraryData { + if bytes.HasPrefix(arb, types.SpecifierFoundation[:]) { + var update types.FoundationAddressUpdate + update.DecodeFrom(types.NewBufDecoder(arb[len(types.SpecifierFoundation):])) + ms.foundationPrimary = update.NewPrimary + ms.foundationFailsafe = update.NewFailsafe + } + } } } // ApplyV2Transaction applies a v2 transaction to the MidState. func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { txid := txn.ID() + var elems uint64 + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + nextElement := func() types.StateElement { + h.Reset() + types.SpecifierElementID.EncodeTo(h.E) + txid.EncodeTo(h.E) + h.E.WriteUint64(elems) + elems++ + return types.StateElement{ID: h.Sum()} + } + for _, sci := range txn.SiacoinInputs { - ms.spends[sci.Parent.ID] = txid + ms.spendSiacoinElement(sci.Parent, txid) } - for i, sco := range txn.SiacoinOutputs { - scoid := v2SiacoinOutputID(txid, i) - ms.sces[scoid] = types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, - SiacoinOutput: sco, - MaturityHeight: 0, - } + for _, sco := range txn.SiacoinOutputs { + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: nextElement(), + SiacoinOutput: sco, + }) } for _, sfi := range txn.SiafundInputs { - ms.spends[sfi.Parent.ID] = txid + ms.spendSiafundElement(sfi.Parent, txid) + claimPortion := ms.siafundPool.Sub(sfi.Parent.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfi.Parent.Value) + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: nextElement(), + SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, + MaturityHeight: ms.base.MaturityHeight(), + }) } - for i, sfo := range txn.SiafundOutputs { - sfoid := v2SiafundOutputID(txid, i) - ms.sfes[sfoid] = types.SiafundElement{ - StateElement: types.StateElement{ID: types.Hash256(sfoid)}, + for _, sfo := range txn.SiafundOutputs { + ms.addSiafundElement(types.SiafundElement{ + StateElement: nextElement(), SiafundOutput: sfo, ClaimStart: ms.siafundPool, - } + }) } - for i, fc := range txn.FileContracts { - fcid := v2FileContractID(txid, i) - ms.v2fces[fcid] = types.V2FileContractElement{ - StateElement: types.StateElement{ID: types.Hash256(fcid)}, + for _, fc := range txn.FileContracts { + ms.addV2FileContractElement(types.V2FileContractElement{ + StateElement: nextElement(), V2FileContract: fc, - } - ms.siafundPool = ms.siafundPool.Add(ms.base.V2FileContractTax(fc)) + }) } for _, fcr := range txn.FileContractRevisions { - fce := fcr.Parent - fce.V2FileContract = fcr.Revision - ms.v2fces[types.FileContractID(fcr.Parent.ID)] = fce - } - for _, res := range txn.FileContractResolutions { - ms.spends[res.Parent.ID] = txid - } -} - -// ApplyDiff applies b to s, returning the resulting effects. -func ApplyDiff(s State, store Store, b types.Block) BlockDiff { - if s.Index.Height > 0 && s.Index.ID != b.ParentID { - panic("consensus: cannot apply non-child block") + ms.reviseV2FileContractElement(fcr.Parent, fcr.Revision) } + for _, fcr := range txn.FileContractResolutions { + ms.resolveV2FileContractElement(fcr.Parent, txid) - ms := NewMidState(s) - - var diff BlockDiff - for _, txn := range b.Transactions { - var tdiff TransactionDiff - for _, sci := range txn.SiacoinInputs { - tdiff.SpentSiacoinElements = append(tdiff.SpentSiacoinElements, ms.mustSiacoinElement(store, sci.ParentID)) - } - for i, sco := range txn.SiacoinOutputs { - scoid := txn.SiacoinOutputID(i) - tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, - SiacoinOutput: sco, - }) - } - for i, fc := range txn.FileContracts { - fcid := txn.FileContractID(i) - tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.FileContractElement{ - StateElement: types.StateElement{ID: types.Hash256(fcid)}, - FileContract: fc, - }) - } - for _, sfi := range txn.SiafundInputs { - sfe, claimPortion := ms.mustSiafundElement(store, sfi.ParentID) - tdiff.SpentSiafundElements = append(tdiff.SpentSiafundElements, sfe) - tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(sfi.ParentID.ClaimOutputID())}, - SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, - MaturityHeight: s.MaturityHeight(), - }) - } - for i, sfo := range txn.SiafundOutputs { - sfoid := txn.SiafundOutputID(i) - tdiff.CreatedSiafundElements = append(tdiff.CreatedSiafundElements, types.SiafundElement{ - StateElement: types.StateElement{ID: types.Hash256(sfoid)}, - SiafundOutput: sfo, - ClaimStart: ms.siafundPool, - }) - } - for _, fcr := range txn.FileContractRevisions { - fce := ms.mustFileContractParentRevision(store, fcr.ParentID, fcr.RevisionNumber) - tdiff.RevisedFileContracts = append(tdiff.RevisedFileContracts, types.FileContractElementRevision{ - Parent: fce, - Revision: fcr.FileContract, + fce := fcr.Parent + var renter, host types.SiacoinOutput + switch r := fcr.Resolution.(type) { + case types.V2FileContractRenewal: + renter, host = r.FinalRevision.RenterOutput, r.FinalRevision.HostOutput + renter.Value = renter.Value.Sub(r.RenterRollover) + host.Value = host.Value.Sub(r.HostRollover) + ms.addV2FileContractElement(types.V2FileContractElement{ + StateElement: nextElement(), + V2FileContract: r.InitialRevision, }) + case types.V2StorageProof: + renter, host = fce.RenterOutput, fce.HostOutput + case types.V2FileContract: // finalization + renter, host = r.RenterOutput, r.HostOutput + case types.V2FileContractExpiration: + renter, host = fce.RenterOutput, fce.MissedHostOutput() } - for _, sp := range txn.StorageProofs { - fce := ms.mustFileContractElement(store, sp.ParentID) - tdiff.ValidFileContracts = append(tdiff.ValidFileContracts, fce) - for i, sco := range fce.ValidProofOutputs { - scoid := sp.ParentID.ValidOutputID(i) - tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, - SiacoinOutput: sco, - MaturityHeight: s.MaturityHeight(), - }) - } - } - - diff.Transactions = append(diff.Transactions, tdiff) - ms.ApplyTransaction(store, txn) + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: nextElement(), + SiacoinOutput: renter, + MaturityHeight: ms.base.MaturityHeight(), + }) + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: nextElement(), + SiacoinOutput: host, + MaturityHeight: ms.base.MaturityHeight(), + }) + } + if txn.NewFoundationAddress != nil { + ms.foundationPrimary = *txn.NewFoundationAddress + ms.foundationFailsafe = *txn.NewFoundationAddress } +} +func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { + for i, txn := range b.Transactions { + ms.ApplyTransaction(txn, bs.Transactions[i]) + } if b.V2 != nil { for _, txn := range b.V2.Transactions { - var tdiff V2TransactionDiff - txid := txn.ID() - - for _, sci := range txn.SiacoinInputs { - tdiff.SpentSiacoinElements = append(tdiff.SpentSiacoinElements, sci.Parent) - } - for _, sco := range txn.SiacoinOutputs { - scoid := v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)) - tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, - SiacoinOutput: sco, - }) - } - for _, fc := range txn.FileContracts { - tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.V2FileContractElement{ - StateElement: types.StateElement{ID: types.Hash256(v2FileContractID(txid, len(tdiff.CreatedFileContracts)))}, - V2FileContract: fc, - }) - } - for _, sfi := range txn.SiafundInputs { - tdiff.SpentSiafundElements = append(tdiff.SpentSiafundElements, sfi.Parent) - scoid := v2SiacoinOutputID(txid, len(tdiff.CreatedSiacoinElements)) - claimPortion := ms.siafundPool.Sub(sfi.Parent.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfi.Parent.Value) - tdiff.CreatedSiacoinElements = append(tdiff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, - SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, - MaturityHeight: s.MaturityHeight(), - }) - } - for _, sfo := range txn.SiafundOutputs { - sfoid := v2SiafundOutputID(txid, len(tdiff.CreatedSiafundElements)) - tdiff.CreatedSiafundElements = append(tdiff.CreatedSiafundElements, types.SiafundElement{ - StateElement: types.StateElement{ID: types.Hash256(sfoid)}, - SiafundOutput: sfo, - ClaimStart: ms.siafundPool, - }) - } - tdiff.RevisedFileContracts = append(tdiff.RevisedFileContracts, txn.FileContractRevisions...) - tdiff.ResolvedFileContracts = append(tdiff.ResolvedFileContracts, txn.FileContractResolutions...) - for _, res := range txn.FileContractResolutions { - if r, ok := res.Resolution.(types.V2FileContractRenewal); ok { - fcid := v2FileContractID(txid, len(tdiff.CreatedFileContracts)) - tdiff.CreatedFileContracts = append(tdiff.CreatedFileContracts, types.V2FileContractElement{ - StateElement: types.StateElement{ID: types.Hash256(fcid)}, - V2FileContract: r.InitialRevision, - }) - } - } - diff.V2Transactions = append(diff.V2Transactions, tdiff) ms.ApplyV2Transaction(txn) } } - bid := b.ID() for i, sco := range b.MinerPayouts { - scoid := bid.MinerOutputID(i) - diff.CreatedSiacoinElements = append(diff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(bid.MinerOutputID(i))}, SiacoinOutput: sco, - MaturityHeight: s.MaturityHeight(), + MaturityHeight: ms.base.MaturityHeight(), + }) + } + if subsidy := ms.base.FoundationSubsidy(); !subsidy.Value.IsZero() { + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(bid.FoundationOutputID())}, + SiacoinOutput: subsidy, + MaturityHeight: ms.base.MaturityHeight(), }) } - for _, fcid := range store.MissedFileContracts(s.childHeight()) { - if _, ok := ms.spent(types.Hash256(fcid)); ok { + for _, fce := range bs.ExpiringFileContracts { + if ms.isSpent(fce.ID) { continue } - fce := ms.mustFileContractElement(store, fcid) - diff.MissedFileContracts = append(diff.MissedFileContracts, fce) + ms.resolveFileContractElement(fce, types.TransactionID(bid)) for i, sco := range fce.MissedProofOutputs { - scoid := fcid.MissedOutputID(i) - diff.CreatedSiacoinElements = append(diff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, + ms.addSiacoinElement(types.SiacoinElement{ + StateElement: types.StateElement{ID: types.Hash256(types.FileContractID(fce.ID).MissedOutputID(i))}, SiacoinOutput: sco, - MaturityHeight: s.MaturityHeight(), + MaturityHeight: ms.base.MaturityHeight(), }) } } - if subsidy := s.FoundationSubsidy(); !subsidy.Value.IsZero() { - scoid := bid.FoundationOutputID() - diff.CreatedSiacoinElements = append(diff.CreatedSiacoinElements, types.SiacoinElement{ - StateElement: types.StateElement{ID: types.Hash256(scoid)}, - SiacoinOutput: subsidy, - MaturityHeight: s.MaturityHeight(), - }) +} + +type ApplyUpdate struct { + ElementApplyUpdate + HistoryApplyUpdate + ms *MidState +} + +func (au ApplyUpdate) ForEachSiacoinElement(fn func(sce types.SiacoinElement, spent bool)) { + for _, sce := range au.ms.sces { + fn(sce, au.ms.isSpent(sce.ID)) + } +} + +func (au ApplyUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, spent bool)) { + for _, sfe := range au.ms.sfes { + fn(sfe, au.ms.isSpent(sfe.ID)) + } +} + +func (au ApplyUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { + for _, fce := range au.ms.fces { + fn(fce, au.ms.revision(fce.ID), au.ms.isSpent(fce.ID)) + } +} + +func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp time.Time) (State, ApplyUpdate) { + if s.Index.Height > 0 && s.Index.ID != b.ParentID { + panic("consensus: cannot apply non-child block") + } + + ms := NewMidState(s) + ms.ApplyBlock(b, bs) + s.SiafundPool = ms.siafundPool + s.FoundationPrimaryAddress = ms.foundationPrimary + s.FoundationFailsafeAddress = ms.foundationFailsafe + eau := s.Elements.ApplyBlock(ms.updated, ms.added) + hau := s.History.ApplyBlock(s.Index) + s = ApplyWork(s, b, targetTimestamp) + return s, ApplyUpdate{eau, hau, ms} +} + +type RevertUpdate struct { + ElementRevertUpdate + HistoryRevertUpdate + ms *MidState +} + +func (ru RevertUpdate) ForEachSiacoinElement(fn func(sce types.SiacoinElement, spent bool)) { + for i := range ru.ms.sces { + sce := ru.ms.sces[len(ru.ms.sces)-i-1] + fn(sce, ru.ms.isSpent(sce.ID)) + } +} + +func (ru RevertUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, spent bool)) { + for i := range ru.ms.sfes { + sfe := ru.ms.sfes[len(ru.ms.sfes)-i-1] + fn(sfe, ru.ms.isSpent(sfe.ID)) + } +} + +func (ru RevertUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { + for i := range ru.ms.fces { + fce := ru.ms.fces[len(ru.ms.fces)-i-1] + fn(fce, ru.ms.revision(fce.ID), ru.ms.isSpent(fce.ID)) + } +} + +func RevertBlock(s State, b types.Block, bs V1BlockSupplement) RevertUpdate { + if s.Index.ID != b.ParentID { + panic("consensus: cannot revert non-child block") + } + ms := NewMidState(s) + ms.ApplyBlock(b, bs) + // invert spends + // + // TODO: this might be horribly inadequate + for i := range ms.updated { + _, spent := ms.spends[ms.updated[i].ID] + ms.updated[i].Spent = !spent } - diff.ElementApplyUpdate = s.Elements.ApplyBlock(&diff) // fills in leaf index + proofs for all elements - diff.HistoryApplyUpdate = s.History.ApplyBlock(types.ChainIndex{Height: s.Index.Height + 1, ID: bid}) - return diff + eru := s.Elements.RevertBlock(ms.updated) + hru := s.History.RevertBlock(s.Index) + return RevertUpdate{eru, hru, ms} } diff --git a/consensus/update_test.go b/consensus/update_test.go index 69d38e9c..3c286e0e 100644 --- a/consensus/update_test.go +++ b/consensus/update_test.go @@ -57,14 +57,51 @@ func TestApplyBlock(t *testing.T) { appendSig(types.Hash256(txn.FileContractRevisions[i].ParentID)) } } - addBlock := func(b types.Block) (diff consensus.BlockDiff, err error) { - if err = consensus.ValidateBlock(cs, dbStore, b); err != nil { + addBlock := func(b types.Block) (au consensus.ApplyUpdate, err error) { + bs := dbStore.SupplementTipBlock(b) + if err = consensus.ValidateBlock(cs, b, bs); err != nil { return } - diff = consensus.ApplyDiff(cs, dbStore, b) - cs = consensus.ApplyState(cs, dbStore, b) + cs, au = consensus.ApplyBlock(cs, b, bs, dbStore.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) return } + checkUpdateElements := func(au consensus.ApplyUpdate, addedSCEs, spentSCEs []types.SiacoinElement, addedSFEs, spentSFEs []types.SiafundElement) { + au.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + sces := &addedSCEs + if spent { + sces = &spentSCEs + } + if len(*sces) == 0 { + t.Fatal("unexpected spent siacoin element") + } + sce.StateElement = types.StateElement{} + if !reflect.DeepEqual(sce, (*sces)[0]) { + js1, _ := json.MarshalIndent(sce, "", " ") + js2, _ := json.MarshalIndent((*sces)[0], "", " ") + t.Fatalf("siacoin element doesn't match:\n%s\nvs\n%s\n", js1, js2) + } + *sces = (*sces)[1:] + }) + au.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + sfes := &addedSFEs + if spent { + sfes = &spentSFEs + } + if len(*sfes) == 0 { + t.Fatal("unexpected spent siafund element") + } + sfe.StateElement = types.StateElement{} + if !reflect.DeepEqual(sfe, (*sfes)[0]) { + js1, _ := json.MarshalIndent(sfe, "", " ") + js2, _ := json.MarshalIndent((*sfes)[0], "", " ") + t.Fatalf("siafund element doesn't match:\n%s\nvs\n%s\n", js1, js2) + } + *sfes = (*sfes)[1:] + }) + if len(addedSCEs)+len(spentSCEs)+len(addedSFEs)+len(spentSFEs) > 0 { + t.Fatal("extraneous elements") + } + } // block with nothing except block reward b1 := types.Block{ @@ -72,21 +109,16 @@ func TestApplyBlock(t *testing.T) { Timestamp: types.CurrentTimestamp(), MinerPayouts: []types.SiacoinOutput{{Address: types.VoidAddress, Value: cs.BlockReward()}}, } - expect := consensus.BlockDiff{ - CreatedSiacoinElements: []types.SiacoinElement{ - { - StateElement: types.StateElement{ID: types.Hash256(b1.ID().MinerOutputID(0))}, - SiacoinOutput: b1.MinerPayouts[0], - MaturityHeight: cs.MaturityHeight(), - }, - }, + addedSCEs := []types.SiacoinElement{ + {SiacoinOutput: b1.MinerPayouts[0], MaturityHeight: cs.MaturityHeight()}, } - if diff, err := addBlock(b1); err != nil { + spentSCEs := []types.SiacoinElement{} + addedSFEs := []types.SiafundElement{} + spentSFEs := []types.SiafundElement{} + if au, err := addBlock(b1); err != nil { t.Fatal(err) - } else if !reflect.DeepEqual(diff, expect) { - js1, _ := json.MarshalIndent(diff, "", " ") - js2, _ := json.MarshalIndent(expect, "", " ") - t.Fatalf("diff doesn't match:\n%s\nvs\n%s\n", js1, js2) + } else { + checkUpdateElements(au, addedSCEs, spentSCEs, addedSFEs, spentSFEs) } // block that spends part of the gift transaction @@ -116,40 +148,25 @@ func TestApplyBlock(t *testing.T) { MinerPayouts: []types.SiacoinOutput{{Address: types.VoidAddress, Value: cs.BlockReward()}}, Transactions: []types.Transaction{txnB2}, } - expect = consensus.BlockDiff{ - Transactions: []consensus.TransactionDiff{{ - CreatedSiacoinElements: []types.SiacoinElement{ - {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiacoinOutputID(0))}, SiacoinOutput: txnB2.SiacoinOutputs[0]}, - {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiacoinOutputID(1))}, SiacoinOutput: txnB2.SiacoinOutputs[1]}, - { - StateElement: types.StateElement{ID: types.Hash256(giftTxn.SiafundOutputID(0).ClaimOutputID())}, - SiacoinOutput: types.SiacoinOutput{Value: types.NewCurrency64(0), Address: txnB2.SiafundInputs[0].ClaimAddress}, - MaturityHeight: cs.MaturityHeight(), - }, - }, - SpentSiacoinElements: []types.SiacoinElement{ - {StateElement: types.StateElement{ID: types.Hash256(giftTxn.SiacoinOutputID(0))}, SiacoinOutput: giftTxn.SiacoinOutputs[0]}, - }, - CreatedSiafundElements: []types.SiafundElement{ - {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiafundOutputID(0))}, SiafundOutput: txnB2.SiafundOutputs[0]}, - {StateElement: types.StateElement{ID: types.Hash256(txnB2.SiafundOutputID(1))}, SiafundOutput: txnB2.SiafundOutputs[1]}, - }, - SpentSiafundElements: []types.SiafundElement{ - {StateElement: types.StateElement{ID: types.Hash256(giftTxn.SiafundOutputID(0))}, SiafundOutput: giftTxn.SiafundOutputs[0]}, - }, - }}, - - CreatedSiacoinElements: []types.SiacoinElement{{ - StateElement: types.StateElement{ID: types.Hash256(b2.ID().MinerOutputID(0))}, - SiacoinOutput: b2.MinerPayouts[0], - MaturityHeight: cs.MaturityHeight(), - }}, + addedSCEs = []types.SiacoinElement{ + {SiacoinOutput: txnB2.SiacoinOutputs[0]}, + {SiacoinOutput: txnB2.SiacoinOutputs[1]}, + {SiacoinOutput: types.SiacoinOutput{Value: types.ZeroCurrency, Address: txnB2.SiafundInputs[0].ClaimAddress}, MaturityHeight: cs.MaturityHeight()}, + {SiacoinOutput: b2.MinerPayouts[0], MaturityHeight: cs.MaturityHeight()}, + } + spentSCEs = []types.SiacoinElement{ + {SiacoinOutput: giftTxn.SiacoinOutputs[0]}, + } + addedSFEs = []types.SiafundElement{ + {SiafundOutput: txnB2.SiafundOutputs[0]}, + {SiafundOutput: txnB2.SiafundOutputs[1]}, + } + spentSFEs = []types.SiafundElement{ + {SiafundOutput: giftTxn.SiafundOutputs[0]}, } - if diff, err := addBlock(b2); err != nil { + if au, err := addBlock(b2); err != nil { t.Fatal(err) - } else if !reflect.DeepEqual(diff, expect) { - js1, _ := json.MarshalIndent(diff, "", " ") - js2, _ := json.MarshalIndent(expect, "", " ") - t.Fatalf("diff doesn't match:\n%s\nvs\n%s\n", js1, js2) + } else { + checkUpdateElements(au, addedSCEs, spentSCEs, addedSFEs, spentSFEs) } } diff --git a/consensus/validation.go b/consensus/validation.go index d974222f..4d3aae1b 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -70,7 +70,7 @@ func validateMinerPayouts(s State, b types.Block) error { // ValidateOrphan validates b in the context of s. func ValidateOrphan(s State, b types.Block) error { // TODO: calculate size more efficiently - if uint64(types.EncodedLen(b)) > s.MaxBlockWeight() { + if uint64(types.EncodedLen(types.V1Block(b))) > s.MaxBlockWeight() { return errors.New("block exceeds maximum weight") } else if err := validateMinerPayouts(s, b); err != nil { return err @@ -81,7 +81,7 @@ func ValidateOrphan(s State, b types.Block) error { if b.V2 != nil { if b.V2.Height != s.Index.Height+1 { return errors.New("block height does not increment parent height") - } else if s.Commitment(b.MinerPayouts[0].Address, b.Transactions, b.V2.Transactions) != b.V2.Commitment { + } else if b.V2.Commitment != s.Commitment(b.MinerPayouts[0].Address, b.Transactions, b.V2.Transactions) { return errors.New("commitment hash does not match header") } } @@ -90,13 +90,22 @@ func ValidateOrphan(s State, b types.Block) error { // A MidState represents the state of the blockchain within a block. type MidState struct { - base State - sces map[types.SiacoinOutputID]types.SiacoinElement - sfes map[types.SiafundOutputID]types.SiafundElement - fces map[types.FileContractID]types.FileContractElement - v2fces map[types.FileContractID]types.V2FileContractElement - spends map[types.Hash256]types.TransactionID - siafundPool types.Currency + base State + ephemeral map[types.Hash256]int // indices into element slices + spends map[types.Hash256]types.TransactionID + revs map[types.Hash256]*types.FileContractElement + siafundPool types.Currency + foundationPrimary types.Address + foundationFailsafe types.Address + + // elements that have been updated or added by the block + sces []types.SiacoinElement + sfes []types.SiafundElement + fces []types.FileContractElement + v2fces []types.V2FileContractElement + // these alias the above + updated []ElementLeaf + added []ElementLeaf } // Index returns the index of the MidState's base state. @@ -104,72 +113,53 @@ func (ms *MidState) Index() types.ChainIndex { return ms.base.Index } -func (ms *MidState) siacoinElement(store Store, id types.SiacoinOutputID) (types.SiacoinElement, bool) { - sce, ok := ms.sces[id] - if !ok { - sce, ok = store.SiacoinElement(id) +func (ms *MidState) siacoinElement(ts V1TransactionSupplement, id types.SiacoinOutputID) (types.SiacoinElement, bool) { + if i, ok := ms.ephemeral[types.Hash256(id)]; ok { + return ms.sces[i], true } - return sce, ok + return ts.siacoinElement(id) } -func (ms *MidState) siafundElement(store Store, id types.SiafundOutputID) (types.SiafundElement, types.Currency, bool) { - sfe, ok := ms.sfes[id] - if !ok { - sfe, ok = store.SiafundElement(id) +func (ms *MidState) siafundElement(ts V1TransactionSupplement, id types.SiafundOutputID) (types.SiafundElement, bool) { + if i, ok := ms.ephemeral[types.Hash256(id)]; ok { + return ms.sfes[i], true } - claimPortion := ms.siafundPool.Sub(sfe.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfe.Value) - return sfe, claimPortion, ok + return ts.siafundElement(id) } -func (ms *MidState) fileContractElement(store Store, id types.FileContractID) (types.FileContractElement, bool) { - fce, ok := ms.fces[id] - if !ok { - fce, ok = store.FileContractElement(id) +func (ms *MidState) fileContractElement(ts V1TransactionSupplement, id types.FileContractID) (types.FileContractElement, bool) { + if i, ok := ms.ephemeral[types.Hash256(id)]; ok { + return ms.fces[i], true } - return fce, ok + return ts.fileContractElement(id) } -func (ms *MidState) mustSiacoinElement(store Store, id types.SiacoinOutputID) types.SiacoinElement { - sce, ok := ms.siacoinElement(store, id) +func (ms *MidState) mustSiacoinElement(ts V1TransactionSupplement, id types.SiacoinOutputID) types.SiacoinElement { + sce, ok := ms.siacoinElement(ts, id) if !ok { panic("missing SiacoinElement") } return sce } -func (ms *MidState) mustSiafundElement(store Store, id types.SiafundOutputID) (types.SiafundElement, types.Currency) { - sfe, claimPortion, ok := ms.siafundElement(store, id) +func (ms *MidState) mustSiafundElement(ts V1TransactionSupplement, id types.SiafundOutputID) types.SiafundElement { + sfe, ok := ms.siafundElement(ts, id) if !ok { panic("missing SiafundElement") } - return sfe, claimPortion + return sfe } -func (ms *MidState) mustFileContractElement(store Store, id types.FileContractID) types.FileContractElement { - fce, ok := ms.fileContractElement(store, id) +func (ms *MidState) mustFileContractElement(ts V1TransactionSupplement, id types.FileContractID) types.FileContractElement { + fce, ok := ms.fileContractElement(ts, id) if !ok { panic("missing FileContractElement") } return fce } -func contractRevisionID(id types.FileContractID, revisionNumber uint64) types.FileContractID { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - id.EncodeTo(h.E) - h.E.WriteUint64(revisionNumber) - return types.FileContractID(h.Sum()) -} - -func (ms *MidState) mustFileContractParentRevision(store Store, id types.FileContractID, newRevisionNumber uint64) types.FileContractElement { - fce, ok := ms.fileContractElement(store, contractRevisionID(id, newRevisionNumber)) - if !ok { - if fce, ok = ms.fileContractElement(store, id); !ok { - panic("missing FileContractElement") - } - } - return fce +func (ms *MidState) revision(id types.Hash256) *types.FileContractElement { + return ms.revs[id] } func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { @@ -177,21 +167,21 @@ func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { return txid, ok } -func (ms *MidState) v2Revision(id types.FileContractID) (types.V2FileContractElement, bool) { - fce, ok := ms.v2fces[id] - return fce, ok +func (ms *MidState) isSpent(id types.Hash256) bool { + _, ok := ms.spends[id] + return ok } // NewMidState constructs a MidState initialized to the provided base state. func NewMidState(s State) *MidState { return &MidState{ - base: s, - sces: make(map[types.SiacoinOutputID]types.SiacoinElement), - sfes: make(map[types.SiafundOutputID]types.SiafundElement), - fces: make(map[types.FileContractID]types.FileContractElement), - v2fces: make(map[types.FileContractID]types.V2FileContractElement), - spends: make(map[types.Hash256]types.TransactionID), - siafundPool: s.SiafundPool, + base: s, + ephemeral: make(map[types.Hash256]int), + spends: make(map[types.Hash256]types.TransactionID), + revs: make(map[types.Hash256]*types.FileContractElement), + siafundPool: s.SiafundPool, + foundationPrimary: s.FoundationPrimaryAddress, + foundationFailsafe: s.FoundationFailsafeAddress, } } @@ -268,7 +258,7 @@ func validateMinimumValues(ms *MidState, txn types.Transaction) error { return nil } -func validateSiacoins(ms *MidState, store Store, txn types.Transaction) error { +func validateSiacoins(ms *MidState, txn types.Transaction, ts V1TransactionSupplement) error { var inputSum types.Currency for i, sci := range txn.SiacoinInputs { if sci.UnlockConditions.Timelock > ms.base.childHeight() { @@ -276,7 +266,7 @@ func validateSiacoins(ms *MidState, store Store, txn types.Transaction) error { } else if txid, ok := ms.spent(types.Hash256(sci.ParentID)); ok { return fmt.Errorf("siacoin input %v double-spends parent output (previously spent in %v)", i, txid) } - parent, ok := ms.siacoinElement(store, sci.ParentID) + parent, ok := ms.siacoinElement(ts, sci.ParentID) if !ok { return fmt.Errorf("siacoin input %v spends nonexistent siacoin output %v", i, sci.ParentID) } else if sci.UnlockConditions.UnlockHash() != parent.Address { @@ -302,7 +292,7 @@ func validateSiacoins(ms *MidState, store Store, txn types.Transaction) error { return nil } -func validateSiafunds(ms *MidState, store Store, txn types.Transaction) error { +func validateSiafunds(ms *MidState, txn types.Transaction, ts V1TransactionSupplement) error { var inputSum uint64 for i, sfi := range txn.SiafundInputs { if sfi.UnlockConditions.Timelock > ms.base.childHeight() { @@ -310,7 +300,7 @@ func validateSiafunds(ms *MidState, store Store, txn types.Transaction) error { } else if txid, ok := ms.spent(types.Hash256(sfi.ParentID)); ok { return fmt.Errorf("siafund input %v double-spends parent output (previously spent in %v)", i, txid) } - parent, _, ok := ms.siafundElement(store, sfi.ParentID) + parent, ok := ms.siafundElement(ts, sfi.ParentID) if !ok { return fmt.Errorf("siafund input %v spends nonexistent siafund output %v", i, sfi.ParentID) } else if sfi.UnlockConditions.UnlockHash() != parent.Address && @@ -332,7 +322,7 @@ func validateSiafunds(ms *MidState, store Store, txn types.Transaction) error { return nil } -func validateFileContracts(ms *MidState, store Store, txn types.Transaction) error { +func validateFileContracts(ms *MidState, txn types.Transaction, ts V1TransactionSupplement) error { for i, fc := range txn.FileContracts { if fc.WindowStart < ms.base.childHeight() { return fmt.Errorf("file contract %v has window that starts in the past", i) @@ -363,7 +353,7 @@ func validateFileContracts(ms *MidState, store Store, txn types.Transaction) err } else if txid, ok := ms.spent(types.Hash256(fcr.ParentID)); ok { return fmt.Errorf("file contract revision %v conflicts with previous proof or revision (in %v)", i, txid) } - parent, ok := ms.fileContractElement(store, fcr.ParentID) + parent, ok := ms.fileContractElement(ts, fcr.ParentID) if !ok { return fmt.Errorf("file contract revision %v revises nonexistent file contract %v", i, fcr.ParentID) } @@ -448,15 +438,12 @@ func validateFileContracts(ms *MidState, store Store, txn types.Transaction) err if txid, ok := ms.spent(types.Hash256(sp.ParentID)); ok { return fmt.Errorf("storage proof %v conflicts with previous proof (in %v)", i, txid) } - fc, ok := ms.fileContractElement(store, sp.ParentID) + fc, ok := ms.fileContractElement(ts, sp.ParentID) if !ok { return fmt.Errorf("storage proof %v references nonexistent file contract", i) } - windowStart, ok := store.BestIndex(fc.WindowStart - 1) - if !ok { - return fmt.Errorf("missing index for contract window start %v", fc.WindowStart) - } - leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, windowStart, sp.ParentID) + windowID := ts.StorageProofBlockIDs[i] + leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, windowID, sp.ParentID) leaf := storageProofLeaf(leafIndex, fc.Filesize, sp.Leaf) if leaf == nil { continue @@ -468,7 +455,7 @@ func validateFileContracts(ms *MidState, store Store, txn types.Transaction) err return nil } -func validateArbitraryData(ms *MidState, store Store, txn types.Transaction) error { +func validateArbitraryData(ms *MidState, txn types.Transaction) error { if ms.base.childHeight() < ms.base.Network.HardforkFoundation.Height { return nil } @@ -575,18 +562,18 @@ func validateSignatures(ms *MidState, txn types.Transaction) error { } // ValidateTransaction validates txn within the context of ms and store. -func ValidateTransaction(ms *MidState, store Store, txn types.Transaction) error { +func ValidateTransaction(ms *MidState, txn types.Transaction, ts V1TransactionSupplement) error { if err := validateCurrencyOverflow(ms, txn); err != nil { return err } else if err := validateMinimumValues(ms, txn); err != nil { return err - } else if err := validateSiacoins(ms, store, txn); err != nil { + } else if err := validateSiacoins(ms, txn, ts); err != nil { return err - } else if err := validateSiafunds(ms, store, txn); err != nil { + } else if err := validateSiafunds(ms, txn, ts); err != nil { return err - } else if err := validateFileContracts(ms, store, txn); err != nil { + } else if err := validateFileContracts(ms, txn, ts); err != nil { return err - } else if err := validateArbitraryData(ms, store, txn); err != nil { + } else if err := validateArbitraryData(ms, txn); err != nil { return err } else if err := validateSignatures(ms, txn); err != nil { return err @@ -716,7 +703,7 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { // check accumulator if sci.Parent.LeafIndex == types.EphemeralLeafIndex { - if _, ok := ms.sces[types.SiacoinOutputID(sci.Parent.ID)]; !ok { + if _, ok := ms.ephemeral[sci.Parent.ID]; !ok { return fmt.Errorf("siacoin input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) } } else if !ms.base.Elements.ContainsUnspentSiacoinElement(sci.Parent) { @@ -772,7 +759,7 @@ func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { // check accumulator if sci.Parent.LeafIndex == types.EphemeralLeafIndex { - if _, ok := ms.sfes[types.SiafundOutputID(sci.Parent.ID)]; !ok { + if _, ok := ms.ephemeral[sci.Parent.ID]; !ok { return fmt.Errorf("siafund input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) } } else if !ms.base.Elements.ContainsUnspentSiafundElement(sci.Parent) { @@ -882,9 +869,6 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { for i, fcr := range txn.FileContractRevisions { cur, rev := fcr.Parent.V2FileContract, fcr.Revision - if fce, ok := ms.v2Revision(types.FileContractID(fcr.Parent.ID)); ok { - cur = fce.V2FileContract - } if err := validateParent(fcr.Parent); err != nil { return fmt.Errorf("file contract revision %v parent (%v) %s", i, fcr.Parent.ID, err) } else if cur.ProofHeight < ms.base.childHeight() { @@ -948,7 +932,7 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } else if ms.base.History.Contains(sp.ProofStart, sp.HistoryProof) { return fmt.Errorf("file contract storage proof %v has invalid history proof", i) } - leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart, types.FileContractID(fcr.Parent.ID)) + leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart.ID, types.FileContractID(fcr.Parent.ID)) if storageProofRoot(ms.base.StorageProofLeafHash(sp.Leaf[:]), leafIndex, fc.Filesize, sp.Proof) != fc.FileMerkleRoot { return fmt.Errorf("file contract storage proof %v has root that does not match contract Merkle root", i) } @@ -1004,12 +988,12 @@ func ValidateV2Transaction(ms *MidState, txn types.V2Transaction) error { return nil } -// ValidateBlock validates b in the context of s and store. +// ValidateBlock validates b in the context of s. // // This function does not check whether the header's timestamp is too far in the // future. That check should be performed at the time the block is received, // e.g. in p2p networking code; see MaxFutureTimestamp. -func ValidateBlock(s State, store Store, b types.Block) error { +func ValidateBlock(s State, b types.Block, bs V1BlockSupplement) error { if err := ValidateOrphan(s, b); err != nil { return err } @@ -1018,11 +1002,11 @@ func ValidateBlock(s State, store Store, b types.Block) error { if s.childHeight() >= ms.base.Network.HardforkV2.RequireHeight { return errors.New("v1 transactions are not allowed after v2 hardfork is complete") } - for _, txn := range b.Transactions { - if err := ValidateTransaction(ms, store, txn); err != nil { + for i, txn := range b.Transactions { + if err := ValidateTransaction(ms, txn, bs.Transactions[i]); err != nil { return err } - ms.ApplyTransaction(store, txn) + ms.ApplyTransaction(txn, bs.Transactions[i]) } } if b.V2 != nil { diff --git a/consensus/validation_test.go b/consensus/validation_test.go index deaa063f..604821d6 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -136,7 +136,7 @@ func TestValidateBlock(t *testing.T) { validBlock := deepCopyBlock(b) signTxn(&validBlock.Transactions[0]) findBlockNonce(cs, &validBlock) - if err := consensus.ValidateBlock(cs, dbStore, validBlock); err != nil { + if err := consensus.ValidateBlock(cs, validBlock, dbStore.SupplementTipBlock(validBlock)); err != nil { t.Fatal(err) } @@ -414,7 +414,7 @@ func TestValidateBlock(t *testing.T) { signTxn(&corruptBlock.Transactions[0]) findBlockNonce(cs, &corruptBlock) - if err := consensus.ValidateBlock(cs, dbStore, corruptBlock); err == nil { + if err := consensus.ValidateBlock(cs, corruptBlock, dbStore.SupplementTipBlock(corruptBlock)); err == nil { t.Fatalf("accepted block with %v", test.desc) } } diff --git a/types/encoding.go b/types/encoding.go index 2ddd9edd..7f757015 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -1293,10 +1293,6 @@ func (b *V1Block) DecodeFrom(d *Decoder) { for i := range b.Transactions { b.Transactions[i].DecodeFrom(d) } - if d.ReadBool() { - b.V2 = new(V2BlockData) - b.V2.DecodeFrom(d) - } } // DecodeFrom implements types.DecoderFrom. diff --git a/types/types.go b/types/types.go index ee0f3563..6d7c0845 100644 --- a/types/types.go +++ b/types/types.go @@ -47,6 +47,7 @@ var ( SpecifierStorageProof = NewSpecifier("storage proof") SpecifierFoundation = NewSpecifier("foundation") SpecifierEntropy = NewSpecifier("entropy") + SpecifierElementID = NewSpecifier("element id") ) // A Hash256 is a generic 256-bit cryptographic hash. From 71cb1efe61c34763157184f654452c7c99f5ed47 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sun, 13 Aug 2023 23:23:08 -0400 Subject: [PATCH 14/53] consensus Add v2 replay prefix --- consensus/state.go | 14 +++++ consensus/store.go | 125 --------------------------------------------- 2 files changed, 14 insertions(+), 125 deletions(-) delete mode 100644 consensus/store.go diff --git a/consensus/state.go b/consensus/state.go index a5264915..ee9a06db 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -302,6 +302,8 @@ func (s State) StorageProofLeafHash(leaf []byte) types.Hash256 { // after each hardfork to prevent replay attacks. func (s State) replayPrefix() []byte { switch { + case s.Index.Height >= s.Network.HardforkV2.AllowHeight: + return []byte{2} case s.Index.Height >= s.Network.HardforkFoundation.Height: return []byte{1} case s.Index.Height >= s.Network.HardforkASIC.Height: @@ -311,6 +313,13 @@ func (s State) replayPrefix() []byte { } } +// v2ReplayPrefix returns the replay protection prefix at the current height. +// These prefixes are included in various hashes; a new prefix is used after +// each hardfork to prevent replay attacks. +func (s State) v2ReplayPrefix() uint8 { + return 2 +} + // WholeSigHash computes the hash of transaction data covered by the // WholeTransaction flag. func (s State) WholeSigHash(txn types.Transaction, parentID types.Hash256, pubkeyIndex uint64, timelock uint64, coveredSigs []uint64) types.Hash256 { @@ -438,6 +447,7 @@ func (s State) Commitment(minerAddr types.Address, txns []types.Transaction, v2t // concatenate the hashes and the miner address h.Reset() h.E.WriteString("sia/commitment|") + h.E.WriteUint8(s.v2ReplayPrefix()) stateHash.EncodeTo(h.E) minerAddr.EncodeTo(h.E) txnsHash.EncodeTo(h.E) @@ -452,6 +462,7 @@ func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/id/transaction|") + h.E.WriteUint8(s.v2ReplayPrefix()) h.E.WritePrefix(len(txn.SiacoinInputs)) for _, in := range txn.SiacoinInputs { in.Parent.ID.EncodeTo(h.E) @@ -500,6 +511,7 @@ func (s State) ContractSigHash(fc types.V2FileContract) types.Hash256 { defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/sig/filecontract|") + h.E.WriteUint8(s.v2ReplayPrefix()) h.E.WriteUint64(fc.Filesize) fc.FileMerkleRoot.EncodeTo(h.E) h.E.WriteUint64(fc.ProofHeight) @@ -519,6 +531,7 @@ func (s State) RenewalSigHash(fcr types.V2FileContractRenewal) types.Hash256 { defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/sig/filecontractrenewal|") + h.E.WriteUint8(s.v2ReplayPrefix()) fcr.FinalRevision.EncodeTo(h.E) fcr.InitialRevision.EncodeTo(h.E) fcr.RenterRollover.EncodeTo(h.E) @@ -532,6 +545,7 @@ func (s State) AttestationSigHash(a types.Attestation) types.Hash256 { defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/sig/attestation|") + h.E.WriteUint8(s.v2ReplayPrefix()) a.PublicKey.EncodeTo(h.E) h.E.WriteString(a.Key) h.E.WriteBytes(a.Value) diff --git a/consensus/store.go b/consensus/store.go deleted file mode 100644 index 68f9edc3..00000000 --- a/consensus/store.go +++ /dev/null @@ -1,125 +0,0 @@ -package consensus - -import ( - "go.sia.tech/core/types" -) - -// A V1TransactionSupplement contains elements that are associated with a v1 -// transaction, but not included in the transaction. For example, v1 -// transactions reference the ID of each SiacoinOutput they spend, but do not -// contain the output itself. Consequently, in order to validate the -// transaction, those outputs must be loaded from a Store. Collecting these -// elements into an explicit struct allows us to preserve them even after the -// Store has been mutated. -type V1TransactionSupplement struct { - SiacoinInputs []types.SiacoinElement - SiafundInputs []types.SiafundElement - RevisedFileContracts []types.FileContractElement - ValidFileContracts []types.FileContractElement - StorageProofBlockIDs []types.BlockID -} - -// EncodeTo implements types.EncoderTo. -func (ts V1TransactionSupplement) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(ts.SiacoinInputs)) - for i := range ts.SiacoinInputs { - ts.SiacoinInputs[i].EncodeTo(e) - } - e.WritePrefix(len(ts.SiafundInputs)) - for i := range ts.SiafundInputs { - ts.SiafundInputs[i].EncodeTo(e) - } - e.WritePrefix(len(ts.RevisedFileContracts)) - for i := range ts.RevisedFileContracts { - ts.RevisedFileContracts[i].EncodeTo(e) - } - e.WritePrefix(len(ts.ValidFileContracts)) - for i := range ts.ValidFileContracts { - ts.ValidFileContracts[i].EncodeTo(e) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (ts *V1TransactionSupplement) DecodeFrom(d *types.Decoder) { - ts.SiacoinInputs = make([]types.SiacoinElement, d.ReadPrefix()) - for i := range ts.SiacoinInputs { - ts.SiacoinInputs[i].DecodeFrom(d) - } - ts.SiafundInputs = make([]types.SiafundElement, d.ReadPrefix()) - for i := range ts.SiafundInputs { - ts.SiafundInputs[i].DecodeFrom(d) - } - ts.RevisedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) - for i := range ts.RevisedFileContracts { - ts.RevisedFileContracts[i].DecodeFrom(d) - } - ts.ValidFileContracts = make([]types.FileContractElement, d.ReadPrefix()) - for i := range ts.ValidFileContracts { - ts.ValidFileContracts[i].DecodeFrom(d) - } -} - -func (ts V1TransactionSupplement) siacoinElement(id types.SiacoinOutputID) (sce types.SiacoinElement, ok bool) { - for _, sce := range ts.SiacoinInputs { - if types.SiacoinOutputID(sce.ID) == id { - return sce, true - } - } - return -} - -func (ts V1TransactionSupplement) siafundElement(id types.SiafundOutputID) (sce types.SiafundElement, ok bool) { - for _, sfe := range ts.SiafundInputs { - if types.SiafundOutputID(sfe.ID) == id { - return sfe, true - } - } - return -} - -func (ts V1TransactionSupplement) fileContractElement(id types.FileContractID) (sce types.FileContractElement, ok bool) { - for _, fce := range ts.RevisedFileContracts { - if types.FileContractID(fce.ID) == id { - return fce, true - } - } - for _, fce := range ts.ValidFileContracts { - if types.FileContractID(fce.ID) == id { - return fce, true - } - } - return -} - -// A V1BlockSupplement contains elements that are associated with a v1 block, -// but not included in the block. This includes supplements for each v1 -// transaction, as well as any file contracts that expired at the block's -// height. -type V1BlockSupplement struct { - Transactions []V1TransactionSupplement - ExpiringFileContracts []types.FileContractElement -} - -// EncodeTo implements types.EncoderTo. -func (bs V1BlockSupplement) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(bs.Transactions)) - for i := range bs.Transactions { - bs.Transactions[i].EncodeTo(e) - } - e.WritePrefix(len(bs.ExpiringFileContracts)) - for i := range bs.ExpiringFileContracts { - bs.ExpiringFileContracts[i].EncodeTo(e) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (bs *V1BlockSupplement) DecodeFrom(d *types.Decoder) { - bs.Transactions = make([]V1TransactionSupplement, d.ReadPrefix()) - for i := range bs.Transactions { - bs.Transactions[i].DecodeFrom(d) - } - bs.ExpiringFileContracts = make([]types.FileContractElement, d.ReadPrefix()) - for i := range bs.ExpiringFileContracts { - bs.ExpiringFileContracts[i].DecodeFrom(d) - } -} From 7b3153fbb51c0dd7583e65f4dc0dc4d819663212 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sun, 13 Aug 2023 23:31:26 -0400 Subject: [PATCH 15/53] consensus: Don't include history proof in InputSigHash --- consensus/state.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/consensus/state.go b/consensus/state.go index ee9a06db..8812bbc0 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -491,6 +491,11 @@ func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { h.E.WritePrefix(len(txn.FileContractResolutions)) for _, fcr := range txn.FileContractResolutions { fcr.Parent.ID.EncodeTo(h.E) + // normalize history proof + if sp, ok := fcr.Resolution.(types.V2StorageProof); ok { + sp.HistoryProof = nil + fcr.Resolution = sp + } fcr.Resolution.(types.EncoderTo).EncodeTo(h.E) } for _, a := range txn.Attestations { From 543cd319c49b081b63842d396236a7c9804c794b Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sun, 13 Aug 2023 23:25:15 -0400 Subject: [PATCH 16/53] chain: Support v2 txns in txpool --- chain/db.go | 2 +- chain/manager.go | 234 ++++++++++++++++++++++++++++++++++++---- consensus/state.go | 141 +++++++++++++++++++++++- consensus/validation.go | 2 +- types/types.go | 9 ++ 5 files changed, 361 insertions(+), 27 deletions(-) diff --git a/chain/db.go b/chain/db.go index ab101f4f..45fd224e 100644 --- a/chain/db.go +++ b/chain/db.go @@ -505,8 +505,8 @@ func (db *DBStore) SupplementTipTransaction(txn types.Transaction) (ts consensus } for _, sp := range txn.StorageProofs { if fce, ok := db.getFileContractElement(sp.ParentID, numLeaves); ok { - ts.ValidFileContracts = append(ts.ValidFileContracts, fce) if windowIndex, ok := db.BestIndex(fce.WindowStart - 1); ok { + ts.ValidFileContracts = append(ts.ValidFileContracts, fce) ts.StorageProofBlockIDs = append(ts.StorageProofBlockIDs, windowIndex.ID) } } diff --git a/chain/manager.go b/chain/manager.go index f20ba4e2..44c542c9 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -104,13 +104,15 @@ type Manager struct { lastCommit time.Time txpool struct { - txns []types.Transaction - indices map[types.TransactionID]int - ms *consensus.MidState - weight uint64 - medianFee *types.Currency - parentMap map[types.Hash256]int - lastReverted []types.Transaction + txns []types.Transaction + v2txns []types.V2Transaction + indices map[types.TransactionID]int + ms *consensus.MidState + weight uint64 + medianFee *types.Currency + parentMap map[types.Hash256]int + lastReverted []types.Transaction + lastRevertedV2 []types.V2Transaction } mu sync.Mutex @@ -261,6 +263,7 @@ func (m *Manager) revertTip() error { } } + m.revertPoolUpdate(cru) m.tipState = pc.State return nil } @@ -303,6 +306,7 @@ func (m *Manager) applyTip(index types.ChainIndex) error { } } + m.applyPoolUpdate(cau) m.tipState = c.State return nil } @@ -381,6 +385,9 @@ func (m *Manager) reorgTo(index types.ChainIndex) error { if len(revert) > 0 { c, _ := m.store.Checkpoint(revert[0].ID) m.txpool.lastReverted = c.Block.Transactions + if c.Block.V2 != nil { + m.txpool.lastRevertedV2 = c.Block.V2.Transactions + } } return nil @@ -461,21 +468,32 @@ func (m *Manager) revalidatePool() { if m.txpool.weight >= txpoolMaxWeight { // sort txns fee without modifying the actual pool slice type feeTxn struct { - index int - fees types.Currency + index int + fees types.Currency + weight uint64 + v2 bool } - txnFees := make([]feeTxn, len(m.txpool.txns)) + txnFees := make([]feeTxn, 0, len(m.txpool.txns)+len(m.txpool.v2txns)) for i, txn := range m.txpool.txns { - txnFees[i].index = i - for _, fee := range txn.MinerFees { - txnFees[i].fees = txnFees[i].fees.Add(fee) - } + txnFees = append(txnFees, feeTxn{ + index: i, + fees: txn.TotalFees(), + weight: m.tipState.TransactionWeight(txn), + }) + } + for i, txn := range m.txpool.v2txns { + txnFees = append(txnFees, feeTxn{ + index: i, + fees: txn.MinerFee, + weight: m.tipState.V2TransactionWeight(txn), + v2: true, + }) } sort.Slice(txnFees, func(i, j int) bool { - return txnFees[i].fees.Cmp(txnFees[j].fees) < 0 + return txnFees[i].fees.Div64(txnFees[i].weight).Cmp(txnFees[j].fees.Div64(txnFees[j].weight)) < 0 }) for m.txpool.weight >= (txpoolMaxWeight*3)/4 { - m.txpool.weight -= m.tipState.TransactionWeight(m.txpool.txns[txnFees[0].index]) + m.txpool.weight -= txnFees[0].weight txnFees = txnFees[1:] } sort.Slice(txnFees, func(i, j int) bool { @@ -492,9 +510,9 @@ func (m *Manager) revalidatePool() { for txid := range m.txpool.indices { delete(m.txpool.indices, txid) } + m.txpool.ms = consensus.NewMidState(m.tipState) txns := append(m.txpool.txns, m.txpool.lastReverted...) m.txpool.txns = m.txpool.txns[:0] - m.txpool.ms = consensus.NewMidState(m.tipState) for _, txn := range txns { ts := m.store.SupplementTipTransaction(txn) if consensus.ValidateTransaction(m.txpool.ms, txn, ts) == nil { @@ -504,6 +522,16 @@ func (m *Manager) revalidatePool() { m.txpool.weight += m.tipState.TransactionWeight(txn) } } + v2txns := append(m.txpool.v2txns, m.txpool.lastRevertedV2...) + m.txpool.v2txns = m.txpool.v2txns[:0] + for _, txn := range v2txns { + if consensus.ValidateV2Transaction(m.txpool.ms, txn) == nil { + m.txpool.ms.ApplyV2Transaction(txn) + m.txpool.indices[txn.ID()] = len(m.txpool.v2txns) + m.txpool.v2txns = append(m.txpool.v2txns, txn) + m.txpool.weight += m.tipState.V2TransactionWeight(txn) + } + } } func (m *Manager) computeMedianFee() types.Currency { @@ -518,11 +546,12 @@ func (m *Manager) computeMedianFee() types.Currency { } var fees []weightedFee for _, txn := range b.Transactions { - var fee types.Currency - for _, mf := range txn.MinerFees { - fee = fee.Add(mf) + fees = append(fees, weightedFee{cs.TransactionWeight(txn), txn.TotalFees()}) + } + if b.V2 != nil { + for _, txn := range b.V2.Transactions { + fees = append(fees, weightedFee{cs.V2TransactionWeight(txn), txn.MinerFee}) } - fees = append(fees, weightedFee{cs.TransactionWeight(txn), fee}) } // account for the remaining space in the block, for which no fees were paid remaining := cs.MaxBlockWeight() @@ -585,6 +614,132 @@ func (m *Manager) computeParentMap() map[types.Hash256]int { return m.txpool.parentMap } +func (m *Manager) applyPoolUpdate(cau consensus.ApplyUpdate) { + // replace ephemeral elements, if necessary + var newElements map[types.Hash256]types.StateElement + replaceEphemeral := func(e *types.StateElement) { + if e.LeafIndex != types.EphemeralLeafIndex { + return + } else if newElements == nil { + newElements := make(map[types.Hash256]types.StateElement) + cau.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + if !spent { + newElements[sce.ID] = sce.StateElement + } + }) + cau.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + if !spent { + newElements[sfe.ID] = sfe.StateElement + } + }) + cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, spent bool) { + if !spent { + newElements[fce.ID] = fce.StateElement + } + }) + } + *e = newElements[e.ID] + } + for _, txn := range m.txpool.v2txns { + for i := range txn.SiacoinInputs { + replaceEphemeral(&txn.SiacoinInputs[i].Parent.StateElement) + } + for i := range txn.SiafundInputs { + replaceEphemeral(&txn.SiafundInputs[i].Parent.StateElement) + } + for i := range txn.FileContractRevisions { + replaceEphemeral(&txn.FileContractRevisions[i].Parent.StateElement) + } + for i := range txn.FileContractResolutions { + replaceEphemeral(&txn.FileContractResolutions[i].Parent.StateElement) + } + } + + // update proofs + for _, txn := range m.txpool.v2txns { + for i := range txn.SiacoinInputs { + cau.UpdateElementProof(&txn.SiacoinInputs[i].Parent.StateElement) + } + for i := range txn.SiafundInputs { + cau.UpdateElementProof(&txn.SiafundInputs[i].Parent.StateElement) + } + for i := range txn.FileContractRevisions { + cau.UpdateElementProof(&txn.FileContractRevisions[i].Parent.StateElement) + } + for i := range txn.FileContractResolutions { + cau.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) + if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { + cau.UpdateHistoryProof(&sp) + txn.FileContractResolutions[i].Resolution = sp + } + } + } +} + +func (m *Manager) revertPoolUpdate(cru consensus.RevertUpdate) { + // restore ephemeral elements, if necessary + var uncreated map[types.Hash256]bool + replaceEphemeral := func(e *types.StateElement) { + if e.LeafIndex != types.EphemeralLeafIndex { + return + } else if uncreated == nil { + uncreated := make(map[types.Hash256]types.StateElement) + cru.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + if !spent { + uncreated[sce.ID] = sce.StateElement + } + }) + cru.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + if !spent { + uncreated[sfe.ID] = sfe.StateElement + } + }) + cru.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, spent bool) { + if !spent { + uncreated[fce.ID] = fce.StateElement + } + }) + } + if uncreated[e.ID] { + *e = types.StateElement{ID: e.ID, LeafIndex: types.EphemeralLeafIndex} + } + } + for _, txn := range m.txpool.v2txns { + for i := range txn.SiacoinInputs { + replaceEphemeral(&txn.SiacoinInputs[i].Parent.StateElement) + } + for i := range txn.SiafundInputs { + replaceEphemeral(&txn.SiafundInputs[i].Parent.StateElement) + } + for i := range txn.FileContractRevisions { + replaceEphemeral(&txn.FileContractRevisions[i].Parent.StateElement) + } + for i := range txn.FileContractResolutions { + replaceEphemeral(&txn.FileContractResolutions[i].Parent.StateElement) + } + } + + // update proofs + for _, txn := range m.txpool.v2txns { + for i := range txn.SiacoinInputs { + cru.UpdateElementProof(&txn.SiacoinInputs[i].Parent.StateElement) + } + for i := range txn.SiafundInputs { + cru.UpdateElementProof(&txn.SiafundInputs[i].Parent.StateElement) + } + for i := range txn.FileContractRevisions { + cru.UpdateElementProof(&txn.FileContractRevisions[i].Parent.StateElement) + } + for i := range txn.FileContractResolutions { + cru.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) + if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { + cru.UpdateHistoryProof(&sp) + txn.FileContractResolutions[i].Resolution = sp + } + } + } +} + // PoolTransaction returns the transaction with the specified ID, if it is // currently in the pool. func (m *Manager) PoolTransaction(id types.TransactionID) (types.Transaction, bool) { @@ -721,8 +876,7 @@ func (m *Manager) AddPoolTransactions(txns []types.Transaction) error { if _, ok := m.txpool.indices[txid]; ok { continue // skip transactions already in pool } - ts := m.store.SupplementTipTransaction(txn) - m.txpool.ms.ApplyTransaction(txn, ts) + m.txpool.ms.ApplyTransaction(txn, m.store.SupplementTipTransaction(txn)) m.txpool.indices[txid] = len(m.txpool.txns) m.txpool.txns = append(m.txpool.txns, txn) m.txpool.weight += m.tipState.TransactionWeight(txn) @@ -730,6 +884,40 @@ func (m *Manager) AddPoolTransactions(txns []types.Transaction) error { return nil } +// AddV2PoolTransactions validates a transaction set and adds it to the txpool. +// If any transaction references an element (SiacoinOutput, SiafundOutput, or +// FileContract) not present in the blockchain, that element must be created by +// a previous transaction in the set. +// +// If any transaction in the set is invalid, the entire set is rejected and none +// of the transactions are added to the pool. +func (m *Manager) AddV2PoolTransactions(txns []types.V2Transaction) error { + m.mu.Lock() + defer m.mu.Unlock() + m.revalidatePool() + + // validate as a standalone set + ms := consensus.NewMidState(m.tipState) + for _, txn := range txns { + if err := consensus.ValidateV2Transaction(ms, txn); err != nil { + return fmt.Errorf("transaction %v is invalid: %v", txn.ID(), err) + } + ms.ApplyV2Transaction(txn) + } + + for _, txn := range txns { + txid := txn.ID() + if _, ok := m.txpool.indices[txid]; ok { + continue // skip transactions already in pool + } + m.txpool.ms.ApplyV2Transaction(txn) + m.txpool.indices[txid] = len(m.txpool.v2txns) + m.txpool.v2txns = append(m.txpool.v2txns, txn) + m.txpool.weight += m.tipState.V2TransactionWeight(txn) + } + return nil +} + // NewManager returns a Manager initialized with the provided Store and State. func NewManager(store Store, cs consensus.State) *Manager { m := &Manager{ diff --git a/consensus/state.go b/consensus/state.go index 8812bbc0..c0fbc208 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -227,12 +227,20 @@ func (s State) TransactionWeight(txn types.Transaction) uint64 { return uint64(types.EncodedLen(txn)) } +// V2TransactionWeight computes the weight of a txn. +func (s State) V2TransactionWeight(txn types.V2Transaction) uint64 { + return uint64(types.EncodedLen(txn)) // TODO +} + // BlockWeight computes the combined weight of a block's txns. -func (s State) BlockWeight(txns []types.Transaction) uint64 { +func (s State) BlockWeight(txns []types.Transaction, v2txns []types.V2Transaction) uint64 { var weight uint64 for _, txn := range txns { weight += s.TransactionWeight(txn) } + for _, txn := range v2txns { + weight += s.V2TransactionWeight(txn) + } return weight } @@ -276,7 +284,7 @@ func (s State) StorageProofLeafIndex(filesize uint64, windowID types.BlockID, fc if filesize%leafSize != 0 { numLeaves++ } - if numLeaves <= 0 { + if numLeaves == 0 { return 0 } seed := types.HashBytes(append(windowID[:], fcid[:]...)) @@ -556,3 +564,132 @@ func (s State) AttestationSigHash(a types.Attestation) types.Hash256 { h.E.WriteBytes(a.Value) return h.Sum() } + +// A V1TransactionSupplement contains elements that are associated with a v1 +// transaction, but not included in the transaction. For example, v1 +// transactions reference the ID of each SiacoinOutput they spend, but do not +// contain the output itself. Consequently, in order to validate the +// transaction, those outputs must be loaded from a Store. Collecting these +// elements into an explicit struct allows us to preserve them even after the +// Store has been mutated. +type V1TransactionSupplement struct { + SiacoinInputs []types.SiacoinElement + SiafundInputs []types.SiafundElement + RevisedFileContracts []types.FileContractElement + ValidFileContracts []types.FileContractElement + StorageProofBlockIDs []types.BlockID // must match ValidFileContracts +} + +// EncodeTo implements types.EncoderTo. +func (ts V1TransactionSupplement) EncodeTo(e *types.Encoder) { + e.WritePrefix(len(ts.SiacoinInputs)) + for i := range ts.SiacoinInputs { + ts.SiacoinInputs[i].EncodeTo(e) + } + e.WritePrefix(len(ts.SiafundInputs)) + for i := range ts.SiafundInputs { + ts.SiafundInputs[i].EncodeTo(e) + } + e.WritePrefix(len(ts.RevisedFileContracts)) + for i := range ts.RevisedFileContracts { + ts.RevisedFileContracts[i].EncodeTo(e) + } + e.WritePrefix(len(ts.ValidFileContracts)) + for i := range ts.ValidFileContracts { + ts.ValidFileContracts[i].EncodeTo(e) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (ts *V1TransactionSupplement) DecodeFrom(d *types.Decoder) { + ts.SiacoinInputs = make([]types.SiacoinElement, d.ReadPrefix()) + for i := range ts.SiacoinInputs { + ts.SiacoinInputs[i].DecodeFrom(d) + } + ts.SiafundInputs = make([]types.SiafundElement, d.ReadPrefix()) + for i := range ts.SiafundInputs { + ts.SiafundInputs[i].DecodeFrom(d) + } + ts.RevisedFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + for i := range ts.RevisedFileContracts { + ts.RevisedFileContracts[i].DecodeFrom(d) + } + ts.ValidFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + for i := range ts.ValidFileContracts { + ts.ValidFileContracts[i].DecodeFrom(d) + } +} + +func (ts V1TransactionSupplement) siacoinElement(id types.SiacoinOutputID) (sce types.SiacoinElement, ok bool) { + for _, sce := range ts.SiacoinInputs { + if types.SiacoinOutputID(sce.ID) == id { + return sce, true + } + } + return +} + +func (ts V1TransactionSupplement) siafundElement(id types.SiafundOutputID) (sce types.SiafundElement, ok bool) { + for _, sfe := range ts.SiafundInputs { + if types.SiafundOutputID(sfe.ID) == id { + return sfe, true + } + } + return +} + +func (ts V1TransactionSupplement) fileContractElement(id types.FileContractID) (sce types.FileContractElement, ok bool) { + for _, fce := range ts.RevisedFileContracts { + if types.FileContractID(fce.ID) == id { + return fce, true + } + } + for _, fce := range ts.ValidFileContracts { + if types.FileContractID(fce.ID) == id { + return fce, true + } + } + return +} + +func (ts V1TransactionSupplement) storageProofWindowID(id types.FileContractID) types.BlockID { + for i, fce := range ts.ValidFileContracts { + if types.FileContractID(fce.ID) == id { + return ts.StorageProofBlockIDs[i] + } + } + panic("missing contract for storage proof window ID") // developer error +} + +// A V1BlockSupplement contains elements that are associated with a v1 block, +// but not included in the block. This includes supplements for each v1 +// transaction, as well as any file contracts that expired at the block's +// height. +type V1BlockSupplement struct { + Transactions []V1TransactionSupplement + ExpiringFileContracts []types.FileContractElement +} + +// EncodeTo implements types.EncoderTo. +func (bs V1BlockSupplement) EncodeTo(e *types.Encoder) { + e.WritePrefix(len(bs.Transactions)) + for i := range bs.Transactions { + bs.Transactions[i].EncodeTo(e) + } + e.WritePrefix(len(bs.ExpiringFileContracts)) + for i := range bs.ExpiringFileContracts { + bs.ExpiringFileContracts[i].EncodeTo(e) + } +} + +// DecodeFrom implements types.DecoderFrom. +func (bs *V1BlockSupplement) DecodeFrom(d *types.Decoder) { + bs.Transactions = make([]V1TransactionSupplement, d.ReadPrefix()) + for i := range bs.Transactions { + bs.Transactions[i].DecodeFrom(d) + } + bs.ExpiringFileContracts = make([]types.FileContractElement, d.ReadPrefix()) + for i := range bs.ExpiringFileContracts { + bs.ExpiringFileContracts[i].DecodeFrom(d) + } +} diff --git a/consensus/validation.go b/consensus/validation.go index 4d3aae1b..974e01e0 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -442,7 +442,7 @@ func validateFileContracts(ms *MidState, txn types.Transaction, ts V1Transaction if !ok { return fmt.Errorf("storage proof %v references nonexistent file contract", i) } - windowID := ts.StorageProofBlockIDs[i] + windowID := ts.storageProofWindowID(sp.ParentID) leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, windowID, sp.ParentID) leaf := storageProofLeaf(leafIndex, fc.Filesize, sp.Leaf) if leaf == nil { diff --git a/types/types.go b/types/types.go index 6d7c0845..11f83e58 100644 --- a/types/types.go +++ b/types/types.go @@ -440,6 +440,15 @@ func (txn *Transaction) FileContractID(i int) FileContractID { return FileContractID(h.Sum()) } +// TotalFees returns the sum of the transaction's miner fees. +func (txn *Transaction) TotalFees() Currency { + var sum Currency + for _, fee := range txn.MinerFees { + sum = sum.Add(fee) + } + return sum +} + // A V2FileContract is a storage agreement between a renter and a host. It // consists of a bidirectional payment channel that resolves as either "valid" // or "missed" depending on whether a valid StorageProof is submitted for the From bb74aa08b8c634f438ddf7d20959f1ce916b68d3 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 14 Aug 2023 22:30:32 -0400 Subject: [PATCH 17/53] all: Satisfy golint --- consensus/update.go | 13 +++++++++++++ types/types.go | 6 ++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/consensus/update.go b/consensus/update.go index 276b9d40..4cb36279 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -428,6 +428,7 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { } } +// ApplyBlock applies a block to the MidState. func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { for i, txn := range b.Transactions { ms.ApplyTransaction(txn, bs.Transactions[i]) @@ -467,30 +468,36 @@ func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { } } +// An ApplyUpdate represents the effects of applying a block to a state. type ApplyUpdate struct { ElementApplyUpdate HistoryApplyUpdate ms *MidState } +// ForEachSiacoinElement calls fn on each siacoin element related to au. func (au ApplyUpdate) ForEachSiacoinElement(fn func(sce types.SiacoinElement, spent bool)) { for _, sce := range au.ms.sces { fn(sce, au.ms.isSpent(sce.ID)) } } +// ForEachSiafundElement calls fn on each siafund element related to au. func (au ApplyUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, spent bool)) { for _, sfe := range au.ms.sfes { fn(sfe, au.ms.isSpent(sfe.ID)) } } +// ForEachFileContractElement calls fn on each file contract element related to +// au. If the contract was revised, rev is non-nil. func (au ApplyUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { for _, fce := range au.ms.fces { fn(fce, au.ms.revision(fce.ID), au.ms.isSpent(fce.ID)) } } +// ApplyBlock applies b to s, producing a new state and a set of effects. func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp time.Time) (State, ApplyUpdate) { if s.Index.Height > 0 && s.Index.ID != b.ParentID { panic("consensus: cannot apply non-child block") @@ -507,12 +514,14 @@ func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp ti return s, ApplyUpdate{eau, hau, ms} } +// A RevertUpdate represents the effects of reverting to a prior state. type RevertUpdate struct { ElementRevertUpdate HistoryRevertUpdate ms *MidState } +// ForEachSiacoinElement calls fn on each siacoin element related to ru. func (ru RevertUpdate) ForEachSiacoinElement(fn func(sce types.SiacoinElement, spent bool)) { for i := range ru.ms.sces { sce := ru.ms.sces[len(ru.ms.sces)-i-1] @@ -520,6 +529,7 @@ func (ru RevertUpdate) ForEachSiacoinElement(fn func(sce types.SiacoinElement, s } } +// ForEachSiafundElement calls fn on each siafund element related to ru. func (ru RevertUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, spent bool)) { for i := range ru.ms.sfes { sfe := ru.ms.sfes[len(ru.ms.sfes)-i-1] @@ -527,6 +537,8 @@ func (ru RevertUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, s } } +// ForEachFileContractElement calls fn on each file contract element related to +// ru. If the contract was revised, rev is non-nil. func (ru RevertUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { for i := range ru.ms.fces { fce := ru.ms.fces[len(ru.ms.fces)-i-1] @@ -534,6 +546,7 @@ func (ru RevertUpdate) ForEachFileContractElement(fn func(fce types.FileContract } } +// RevertBlock reverts b, producing the effects undone by the block. func RevertBlock(s State, b types.Block, bs V1BlockSupplement) RevertUpdate { if s.Index.ID != b.ParentID { panic("consensus: cannot revert non-child block") diff --git a/types/types.go b/types/types.go index 11f83e58..e3a924b4 100644 --- a/types/types.go +++ b/types/types.go @@ -720,7 +720,8 @@ func (txn *V2Transaction) ID() TransactionID { return TransactionID(h.Sum()) } -// V2SiacoinOutputID returns the ID of the siacoin output at index i. +// EphemeralSiacoinOutput returns a SiacoinElement for the siacoin output at +// index i. func (txn *V2Transaction) EphemeralSiacoinOutput(i int) SiacoinElement { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) @@ -737,7 +738,8 @@ func (txn *V2Transaction) EphemeralSiacoinOutput(i int) SiacoinElement { } } -// V2SiafundOutputID returns the ID of the siafund output at index i. +// EphemeralSiafundOutput returns a SiafundElement for the siafund output at +// index i. func (txn *V2Transaction) EphemeralSiafundOutput(i int) SiafundElement { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) From 643b1656dbbce9b270ac728ab38a8bb51a06772a Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 14 Aug 2023 22:33:50 -0400 Subject: [PATCH 18/53] consensus: Merge HistoryAccumulator into ElementAccumulator --- chain/manager.go | 4 +- consensus/merkle.go | 137 +++++++++------------------------------- consensus/state.go | 7 +- consensus/update.go | 21 ++++-- consensus/validation.go | 5 +- types/encoding.go | 20 +++--- types/types.go | 12 +++- 7 files changed, 72 insertions(+), 134 deletions(-) diff --git a/chain/manager.go b/chain/manager.go index 44c542c9..17e0b063 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -669,7 +669,7 @@ func (m *Manager) applyPoolUpdate(cau consensus.ApplyUpdate) { for i := range txn.FileContractResolutions { cau.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { - cau.UpdateHistoryProof(&sp) + cau.UpdateElementProof(&sp.ProofStart.StateElement) txn.FileContractResolutions[i].Resolution = sp } } @@ -733,7 +733,7 @@ func (m *Manager) revertPoolUpdate(cru consensus.RevertUpdate) { for i := range txn.FileContractResolutions { cru.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { - cru.UpdateHistoryProof(&sp) + cru.UpdateElementProof(&sp.ProofStart.StateElement) txn.FileContractResolutions[i].Resolution = sp } } diff --git a/consensus/merkle.go b/consensus/merkle.go index e32f3098..784fdd30 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -135,17 +135,30 @@ func V2FileContractLeaf(e *types.V2FileContractElement, spent bool) ElementLeaf } } -type accumulator struct { - Trees [64]types.Hash256 - NumLeaves uint64 +// ChainIndexLeaf returns the ElementLeaf for a ChainIndexElement. +func ChainIndexLeaf(e *types.ChainIndexElement) ElementLeaf { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/leaf/chainindex|") + e.StateElement.ID.EncodeTo(h.E) + e.ChainIndex.EncodeTo(h.E) + return ElementLeaf{ + StateElement: &e.StateElement, + ElementHash: h.Sum(), + Spent: false, + } } -func (acc *accumulator) hasTreeAtHeight(height int) bool { - return acc.NumLeaves&(1< len(*proof) { - *proof = append(*proof, hau.growth[len(*proof)]) - *proof = append(*proof, hau.proof[len(*proof):]...) - } -} - -// UpdateHistoryProof updates the supplied storage proof to incorporate changes -// made to the chain history. The proof must be up-to-date; if it is not, -// UpdateHistoryProof may panic. -func (hau *HistoryApplyUpdate) UpdateHistoryProof(sp *types.V2StorageProof) { - hau.UpdateProof(&sp.HistoryProof) -} - -// A HistoryRevertUpdate reflects the changes to a HistoryAccumulator resulting -// from the removal of a block. -type HistoryRevertUpdate struct { - index types.ChainIndex -} - -// UpdateProof updates the supplied history proof to incorporate the changes -// made to the chain history. The proof must be up-to-date; if it is not, -// UpdateHistoryProof may panic. -func (hru *HistoryRevertUpdate) UpdateProof(height uint64, proof *[]types.Hash256) { - if mh := mergeHeight(hru.index.Height, height); mh <= len(*proof) { - *proof = (*proof)[:mh-1] - } -} - -// UpdateHistoryProof updates the supplied storage proof to incorporate the -// changes made to the chain history. The proof must be up-to-date; if it is -// not, UpdateHistoryProof may panic. -func (hru *HistoryRevertUpdate) UpdateHistoryProof(sp *types.V2StorageProof) { - hru.UpdateProof(sp.ProofStart.Height, &sp.HistoryProof) -} diff --git a/consensus/state.go b/consensus/state.go index c0fbc208..64308e3a 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -95,7 +95,6 @@ type State struct { FoundationFailsafeAddress types.Address `json:"foundationFailsafeAddress"` Elements ElementAccumulator `json:"elements"` - History HistoryAccumulator `json:"history"` } // EncodeTo implements types.EncoderTo. @@ -113,7 +112,6 @@ func (s State) EncodeTo(e *types.Encoder) { s.FoundationPrimaryAddress.EncodeTo(e) s.FoundationFailsafeAddress.EncodeTo(e) s.Elements.EncodeTo(e) - s.History.EncodeTo(e) } // DecodeFrom implements types.DecoderFrom. @@ -131,7 +129,6 @@ func (s *State) DecodeFrom(d *types.Decoder) { s.FoundationPrimaryAddress.DecodeFrom(d) s.FoundationFailsafeAddress.DecodeFrom(d) s.Elements.DecodeFrom(d) - s.History.DecodeFrom(d) } func (s State) childHeight() uint64 { return s.Index.Height + 1 } @@ -499,9 +496,9 @@ func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { h.E.WritePrefix(len(txn.FileContractResolutions)) for _, fcr := range txn.FileContractResolutions { fcr.Parent.ID.EncodeTo(h.E) - // normalize history proof + // normalize proof if sp, ok := fcr.Resolution.(types.V2StorageProof); ok { - sp.HistoryProof = nil + sp.ProofStart.MerkleProof = nil fcr.Resolution = sp } fcr.Resolution.(types.EncoderTo).EncodeTo(h.E) diff --git a/consensus/update.go b/consensus/update.go index 4cb36279..e781827f 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -466,12 +466,17 @@ func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { }) } } + + ms.cie = types.ChainIndexElement{ + StateElement: types.StateElement{ID: types.Hash256(bid)}, + ChainIndex: types.ChainIndex{Height: ms.base.childHeight(), ID: bid}, + } + ms.added = append(ms.added, ChainIndexLeaf(&ms.cie)) } // An ApplyUpdate represents the effects of applying a block to a state. type ApplyUpdate struct { ElementApplyUpdate - HistoryApplyUpdate ms *MidState } @@ -497,6 +502,13 @@ func (au ApplyUpdate) ForEachFileContractElement(fn func(fce types.FileContractE } } +// ChainIndexElement returns the chain index element for the applied block. +func (au ApplyUpdate) ChainIndexElement() types.ChainIndexElement { + cie := au.ms.cie + cie.MerkleProof = append([]types.Hash256(nil), cie.MerkleProof...) + return cie +} + // ApplyBlock applies b to s, producing a new state and a set of effects. func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp time.Time) (State, ApplyUpdate) { if s.Index.Height > 0 && s.Index.ID != b.ParentID { @@ -509,15 +521,13 @@ func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp ti s.FoundationPrimaryAddress = ms.foundationPrimary s.FoundationFailsafeAddress = ms.foundationFailsafe eau := s.Elements.ApplyBlock(ms.updated, ms.added) - hau := s.History.ApplyBlock(s.Index) s = ApplyWork(s, b, targetTimestamp) - return s, ApplyUpdate{eau, hau, ms} + return s, ApplyUpdate{eau, ms} } // A RevertUpdate represents the effects of reverting to a prior state. type RevertUpdate struct { ElementRevertUpdate - HistoryRevertUpdate ms *MidState } @@ -562,6 +572,5 @@ func RevertBlock(s State, b types.Block, bs V1BlockSupplement) RevertUpdate { } eru := s.Elements.RevertBlock(ms.updated) - hru := s.History.RevertBlock(s.Index) - return RevertUpdate{eru, hru, ms} + return RevertUpdate{eru, ms} } diff --git a/consensus/validation.go b/consensus/validation.go index 974e01e0..5b0681c8 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -103,6 +103,7 @@ type MidState struct { sfes []types.SiafundElement fces []types.FileContractElement v2fces []types.V2FileContractElement + cie types.ChainIndexElement // these alias the above updated []ElementLeaf added []ElementLeaf @@ -929,10 +930,10 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } else if sp.ProofStart.Height != fc.ProofHeight { // see note on this field in types.StorageProof return fmt.Errorf("file contract storage proof %v has ProofStart (%v) that does not match contract ProofStart (%v)", i, sp.ProofStart.Height, fc.ProofHeight) - } else if ms.base.History.Contains(sp.ProofStart, sp.HistoryProof) { + } else if ms.base.Elements.ContainsBlock(sp.ProofStart) { return fmt.Errorf("file contract storage proof %v has invalid history proof", i) } - leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart.ID, types.FileContractID(fcr.Parent.ID)) + leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart.ChainIndex.ID, types.FileContractID(fcr.Parent.ID)) if storageProofRoot(ms.base.StorageProofLeafHash(sp.Leaf[:]), leafIndex, fc.Filesize, sp.Proof) != fc.FileMerkleRoot { return fmt.Errorf("file contract storage proof %v has root that does not match contract Merkle root", i) } diff --git a/types/encoding.go b/types/encoding.go index 7f757015..d22499d5 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -523,6 +523,12 @@ func (in V2SiacoinInput) EncodeTo(e *Encoder) { } } +// EncodeTo implements types.EncoderTo. +func (cie ChainIndexElement) EncodeTo(e *Encoder) { + cie.StateElement.EncodeTo(e) + cie.ChainIndex.EncodeTo(e) +} + // EncodeTo implements types.EncoderTo. func (sce SiacoinElement) EncodeTo(e *Encoder) { sce.StateElement.EncodeTo(e) @@ -602,10 +608,6 @@ func (ren V2FileContractRenewal) EncodeTo(e *Encoder) { // EncodeTo implements types.EncoderTo. func (sp V2StorageProof) EncodeTo(e *Encoder) { sp.ProofStart.EncodeTo(e) - e.WritePrefix(len(sp.HistoryProof)) - for _, p := range sp.HistoryProof { - p.EncodeTo(e) - } e.Write(sp.Leaf[:]) e.WritePrefix(len(sp.Proof)) for _, p := range sp.Proof { @@ -1075,6 +1077,12 @@ func (in *V2SiacoinInput) DecodeFrom(d *Decoder) { } } +// DecodeFrom implements types.DecoderFrom. +func (cie *ChainIndexElement) DecodeFrom(d *Decoder) { + cie.StateElement.DecodeFrom(d) + cie.ChainIndex.DecodeFrom(d) +} + // DecodeFrom implements types.DecoderFrom. func (sce *SiacoinElement) DecodeFrom(d *Decoder) { sce.StateElement.DecodeFrom(d) @@ -1154,10 +1162,6 @@ func (ren *V2FileContractRenewal) DecodeFrom(d *Decoder) { // DecodeFrom implements types.DecoderFrom. func (sp *V2StorageProof) DecodeFrom(d *Decoder) { sp.ProofStart.DecodeFrom(d) - sp.HistoryProof = make([]Hash256, d.ReadPrefix()) - for i := range sp.HistoryProof { - sp.HistoryProof[i].DecodeFrom(d) - } d.Read(sp.Leaf[:]) sp.Proof = make([]Hash256, d.ReadPrefix()) for i := range sp.Proof { diff --git a/types/types.go b/types/types.go index e3a924b4..f329bddf 100644 --- a/types/types.go +++ b/types/types.go @@ -569,8 +569,7 @@ type V2StorageProof struct { // Consequently, ProofStart.Height MUST match the ProofStart field of the // contract's final revision; otherwise, the prover could use any // ProofStart, giving them control over the leaf index. - ProofStart ChainIndex - HistoryProof []Hash256 + ProofStart ChainIndexElement // The leaf is always 64 bytes, extended with zeros if necessary. Leaf [64]byte @@ -589,6 +588,12 @@ type StateElement struct { MerkleProof []Hash256 `json:"merkleProof"` } +// A ChainIndexElement is a record of a block in the chain. +type ChainIndexElement struct { + StateElement + ChainIndex +} + // A SiacoinElement is a volume of siacoins that is created and spent as an // atomic unit. type SiacoinElement struct { @@ -780,11 +785,12 @@ func (txn *V2Transaction) DeepCopy() V2Transaction { for i := range c.FileContractResolutions { c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) if sp, ok := c.FileContractResolutions[i].Resolution.(V2StorageProof); ok { - sp.HistoryProof = append([]Hash256(nil), sp.HistoryProof...) + sp.ProofStart.MerkleProof = append([]Hash256(nil), sp.ProofStart.MerkleProof...) sp.Proof = append([]Hash256(nil), sp.Proof...) c.FileContractResolutions[i].Resolution = sp } } + c.Attestations = append([]Attestation(nil), c.Attestations...) for i := range c.Attestations { c.Attestations[i].Value = append([]byte(nil), c.Attestations[i].Value...) } From 96e57e45662e03ea7f7a1d4fd46e30a9aa509357 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 14 Aug 2023 22:41:31 -0400 Subject: [PATCH 19/53] consensus: Unexport most accumulator functions --- chain/manager.go | 2 +- consensus/merkle.go | 92 ++++++++++++++++++++--------------------- consensus/update.go | 36 ++++++++-------- consensus/validation.go | 9 ++-- 4 files changed, 69 insertions(+), 70 deletions(-) diff --git a/chain/manager.go b/chain/manager.go index 17e0b063..7c487811 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -228,7 +228,7 @@ func (m *Manager) AddBlocks(blocks []types.Block) error { } else if err := consensus.ValidateOrphan(cs, b); err != nil { return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: b.ID()}, err) } - cs = consensus.ApplyWork(cs, b, m.store.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) + cs = consensus.ApplyOrphan(cs, b, m.store.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) m.store.AddCheckpoint(Checkpoint{b, cs, nil}) } diff --git a/consensus/merkle.go b/consensus/merkle.go index 784fdd30..39863d3e 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -49,15 +49,15 @@ func storageProofRoot(leafHash types.Hash256, leafIndex uint64, filesize uint64, return root } -// An ElementLeaf represents a leaf in the ElementAccumulator Merkle tree. -type ElementLeaf struct { +// An elementLeaf represents a leaf in the ElementAccumulator Merkle tree. +type elementLeaf struct { *types.StateElement ElementHash types.Hash256 Spent bool } -// Hash returns the leaf's hash, for direct use in the Merkle tree. -func (l ElementLeaf) Hash() types.Hash256 { +// hash returns the leaf's hash, for direct use in the Merkle tree. +func (l elementLeaf) hash() types.Hash256 { buf := make([]byte, 1+32+8+1) buf[0] = leafHashPrefix copy(buf[1:], l.ElementHash[:]) @@ -68,13 +68,13 @@ func (l ElementLeaf) Hash() types.Hash256 { return types.HashBytes(buf) } -// ProofRoot returns the root obtained from the leaf and its proof.. -func (l ElementLeaf) ProofRoot() types.Hash256 { - return proofRoot(l.Hash(), l.LeafIndex, l.MerkleProof) +// proofRoot returns the root obtained from the leaf and its proof.. +func (l elementLeaf) proofRoot() types.Hash256 { + return proofRoot(l.hash(), l.LeafIndex, l.MerkleProof) } -// SiacoinLeaf returns the ElementLeaf for a SiacoinElement. -func SiacoinLeaf(e *types.SiacoinElement, spent bool) ElementLeaf { +// siacoinLeaf returns the elementLeaf for a SiacoinElement. +func siacoinLeaf(e *types.SiacoinElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() @@ -82,15 +82,15 @@ func SiacoinLeaf(e *types.SiacoinElement, spent bool) ElementLeaf { e.ID.EncodeTo(h.E) e.SiacoinOutput.EncodeTo(h.E) h.E.WriteUint64(e.MaturityHeight) - return ElementLeaf{ + return elementLeaf{ StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } -// SiafundLeaf returns the ElementLeaf for a SiafundElement. -func SiafundLeaf(e *types.SiafundElement, spent bool) ElementLeaf { +// siafundLeaf returns the elementLeaf for a SiafundElement. +func siafundLeaf(e *types.SiafundElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() @@ -98,52 +98,52 @@ func SiafundLeaf(e *types.SiafundElement, spent bool) ElementLeaf { e.ID.EncodeTo(h.E) e.SiafundOutput.EncodeTo(h.E) e.ClaimStart.EncodeTo(h.E) - return ElementLeaf{ + return elementLeaf{ StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } -// FileContractLeaf returns the ElementLeaf for a FileContractElement. -func FileContractLeaf(e *types.FileContractElement, spent bool) ElementLeaf { +// fileContractLeaf returns the elementLeaf for a FileContractElement. +func fileContractLeaf(e *types.FileContractElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/leaf/filecontract|") e.ID.EncodeTo(h.E) e.FileContract.EncodeTo(h.E) - return ElementLeaf{ + return elementLeaf{ StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } -// V2FileContractLeaf returns the ElementLeaf for a V2FileContractElement. -func V2FileContractLeaf(e *types.V2FileContractElement, spent bool) ElementLeaf { +// v2FileContractLeaf returns the elementLeaf for a V2FileContractElement. +func v2FileContractLeaf(e *types.V2FileContractElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/leaf/v2filecontract|") e.ID.EncodeTo(h.E) e.V2FileContract.EncodeTo(h.E) - return ElementLeaf{ + return elementLeaf{ StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: spent, } } -// ChainIndexLeaf returns the ElementLeaf for a ChainIndexElement. -func ChainIndexLeaf(e *types.ChainIndexElement) ElementLeaf { +// chainIndexLeaf returns the elementLeaf for a ChainIndexElement. +func chainIndexLeaf(e *types.ChainIndexElement) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/leaf/chainindex|") e.StateElement.ID.EncodeTo(h.E) e.ChainIndex.EncodeTo(h.E) - return ElementLeaf{ + return elementLeaf{ StateElement: &e.StateElement, ElementHash: h.Sum(), Spent: false, @@ -216,55 +216,55 @@ func (acc *ElementAccumulator) hasTreeAtHeight(height int) bool { return acc.NumLeaves&(1<= mid }) return ls[:split], ls[split:] } - var recompute func(i, j uint64, leaves []ElementLeaf) types.Hash256 - recompute = func(i, j uint64, leaves []ElementLeaf) types.Hash256 { + var recompute func(i, j uint64, leaves []elementLeaf) types.Hash256 + recompute = func(i, j uint64, leaves []elementLeaf) types.Hash256 { height := bits.TrailingZeros64(j - i) // equivalent to log2(j-i), as j-i is always a power of two if len(leaves) == 1 && height == 0 { - return leaves[0].Hash() + return leaves[0].hash() } mid := (i + j) / 2 left, right := splitLeaves(leaves, mid) @@ -358,7 +358,7 @@ func updateLeaves(leaves []ElementLeaf) [64][]ElementLeaf { } // Group leaves by tree, and sort them by leaf index. - var trees [64][]ElementLeaf + var trees [64][]elementLeaf sort.Slice(leaves, func(i, j int) bool { if len(leaves[i].MerkleProof) != len(leaves[j].MerkleProof) { return len(leaves[i].MerkleProof) < len(leaves[j].MerkleProof) @@ -392,11 +392,11 @@ func updateLeaves(leaves []ElementLeaf) [64][]ElementLeaf { // ApplyBlock applies the supplied leaves to the accumulator, modifying it and // producing an update. -func (acc *ElementAccumulator) ApplyBlock(updated, added []ElementLeaf) (eau ElementApplyUpdate) { +func (acc *ElementAccumulator) ApplyBlock(updated, added []elementLeaf) (eau ElementApplyUpdate) { eau.updated = updateLeaves(updated) for height, es := range eau.updated { if len(es) > 0 { - acc.Trees[height] = es[0].ProofRoot() + acc.Trees[height] = es[0].proofRoot() } } eau.treeGrowth = acc.addLeaves(added) @@ -409,13 +409,13 @@ func (acc *ElementAccumulator) ApplyBlock(updated, added []ElementLeaf) (eau Ele // RevertBlock modifies the proofs of supplied elements such that they validate // under acc, which must be the accumulator prior to the application of those // elements. The accumulator itself is not modified. -func (acc *ElementAccumulator) RevertBlock(updated []ElementLeaf) (eru ElementRevertUpdate) { +func (acc *ElementAccumulator) RevertBlock(updated []elementLeaf) (eru ElementRevertUpdate) { eru.updated = updateLeaves(updated) eru.numLeaves = acc.NumLeaves return } -func updateProof(e *types.StateElement, updated *[64][]ElementLeaf) { +func updateProof(e *types.StateElement, updated *[64][]elementLeaf) { // find the "closest" updated object (the one with the lowest mergeHeight) updatedInTree := updated[len(e.MerkleProof)] if len(updatedInTree) == 0 { @@ -436,14 +436,14 @@ func updateProof(e *types.StateElement, updated *[64][]ElementLeaf) { mh := mergeHeight(e.LeafIndex, best.LeafIndex) copy(e.MerkleProof[mh:], best.MerkleProof[mh:]) // at the merge point itself, compute the updated sibling hash - e.MerkleProof[mh-1] = proofRoot(best.Hash(), best.LeafIndex, best.MerkleProof[:mh-1]) + e.MerkleProof[mh-1] = proofRoot(best.hash(), best.LeafIndex, best.MerkleProof[:mh-1]) } } // An ElementApplyUpdate reflects the changes to an ElementAccumulator resulting // from the application of a block. type ElementApplyUpdate struct { - updated [64][]ElementLeaf + updated [64][]elementLeaf treeGrowth [64][]types.Hash256 } @@ -461,7 +461,7 @@ func (eau *ElementApplyUpdate) UpdateElementProof(e *types.StateElement) { // An ElementRevertUpdate reflects the changes to an ElementAccumulator // resulting from the removal of a block. type ElementRevertUpdate struct { - updated [64][]ElementLeaf + updated [64][]elementLeaf numLeaves uint64 } diff --git a/consensus/update.go b/consensus/update.go index e781827f..ec80181e 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -151,9 +151,9 @@ func adjustTarget(s State, blockTimestamp time.Time, targetTimestamp time.Time) return newTarget } -// ApplyWork applies the work of b to s, returning the resulting state. Only the -// PoW-related fields are updated. -func ApplyWork(s State, b types.Block, targetTimestamp time.Time) State { +// ApplyOrphan applies the work of b to s, returning the resulting state. Only +// the PoW-related fields are updated. +func ApplyOrphan(s State, b types.Block, targetTimestamp time.Time) State { if s.Index.Height > 0 && s.Index.ID != b.ParentID { panic("consensus: cannot apply non-child block") } @@ -176,7 +176,7 @@ func ApplyWork(s State, b types.Block, targetTimestamp time.Time) State { } -func (ms *MidState) addedLeaf(id types.Hash256) *ElementLeaf { +func (ms *MidState) addedLeaf(id types.Hash256) *elementLeaf { for i := range ms.added { if ms.added[i].ID == id { return &ms.added[i] @@ -187,7 +187,7 @@ func (ms *MidState) addedLeaf(id types.Hash256) *ElementLeaf { func (ms *MidState) addSiacoinElement(sce types.SiacoinElement) { ms.sces = append(ms.sces, sce) - ms.added = append(ms.added, SiacoinLeaf(&ms.sces[len(ms.sces)-1], false)) + ms.added = append(ms.added, siacoinLeaf(&ms.sces[len(ms.sces)-1], false)) ms.ephemeral[ms.sces[len(ms.sces)-1].ID] = len(ms.sces) - 1 } @@ -198,13 +198,13 @@ func (ms *MidState) spendSiacoinElement(sce types.SiacoinElement, txid types.Tra } else { sce.MerkleProof = append([]types.Hash256(nil), sce.MerkleProof...) ms.sces = append(ms.sces, sce) - ms.updated = append(ms.updated, SiacoinLeaf(&ms.sces[len(ms.sces)-1], true)) + ms.updated = append(ms.updated, siacoinLeaf(&ms.sces[len(ms.sces)-1], true)) } } func (ms *MidState) addSiafundElement(sfe types.SiafundElement) { ms.sfes = append(ms.sfes, sfe) - ms.added = append(ms.added, SiafundLeaf(&ms.sfes[len(ms.sfes)-1], false)) + ms.added = append(ms.added, siafundLeaf(&ms.sfes[len(ms.sfes)-1], false)) ms.ephemeral[ms.sfes[len(ms.sfes)-1].ID] = len(ms.sfes) - 1 } @@ -215,13 +215,13 @@ func (ms *MidState) spendSiafundElement(sfe types.SiafundElement, txid types.Tra } else { sfe.MerkleProof = append([]types.Hash256(nil), sfe.MerkleProof...) ms.sfes = append(ms.sfes, sfe) - ms.updated = append(ms.updated, SiafundLeaf(&ms.sfes[len(ms.sfes)-1], true)) + ms.updated = append(ms.updated, siafundLeaf(&ms.sfes[len(ms.sfes)-1], true)) } } func (ms *MidState) addFileContractElement(fce types.FileContractElement) { ms.fces = append(ms.fces, fce) - ms.added = append(ms.added, FileContractLeaf(&ms.fces[len(ms.fces)-1], false)) + ms.added = append(ms.added, fileContractLeaf(&ms.fces[len(ms.fces)-1], false)) ms.ephemeral[ms.fces[len(ms.fces)-1].ID] = len(ms.fces) - 1 ms.siafundPool = ms.siafundPool.Add(ms.base.FileContractTax(fce.FileContract)) } @@ -230,13 +230,13 @@ func (ms *MidState) reviseFileContractElement(fce types.FileContractElement, rev rev.Payout = fce.FileContract.Payout if i, ok := ms.ephemeral[fce.ID]; ok { ms.fces[i].FileContract = rev - *ms.addedLeaf(fce.ID) = FileContractLeaf(&ms.fces[i], false) + *ms.addedLeaf(fce.ID) = fileContractLeaf(&ms.fces[i], false) } else { if r, ok := ms.revs[fce.ID]; ok { r.FileContract = rev for i := range ms.updated { if ms.updated[i].ID == fce.ID { - ms.updated[i] = FileContractLeaf(r, false) + ms.updated[i] = fileContractLeaf(r, false) break } } @@ -248,7 +248,7 @@ func (ms *MidState) reviseFileContractElement(fce types.FileContractElement, rev fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) fce.FileContract = rev ms.revs[fce.ID] = &fce - ms.updated = append(ms.updated, FileContractLeaf(&fce, false)) + ms.updated = append(ms.updated, fileContractLeaf(&fce, false)) } } } @@ -257,12 +257,12 @@ func (ms *MidState) resolveFileContractElement(fce types.FileContractElement, tx ms.spends[fce.ID] = txid fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.fces = append(ms.fces, fce) - ms.updated = append(ms.updated, FileContractLeaf(&ms.fces[len(ms.fces)-1], true)) + ms.updated = append(ms.updated, fileContractLeaf(&ms.fces[len(ms.fces)-1], true)) } func (ms *MidState) addV2FileContractElement(fce types.V2FileContractElement) { ms.v2fces = append(ms.v2fces, fce) - ms.added = append(ms.added, V2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], false)) + ms.added = append(ms.added, v2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], false)) ms.ephemeral[ms.v2fces[len(ms.v2fces)-1].ID] = len(ms.v2fces) - 1 ms.siafundPool = ms.siafundPool.Add(ms.base.V2FileContractTax(fce.V2FileContract)) } @@ -270,14 +270,14 @@ func (ms *MidState) addV2FileContractElement(fce types.V2FileContractElement) { func (ms *MidState) reviseV2FileContractElement(fce types.V2FileContractElement, rev types.V2FileContract) { fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.v2fces = append(ms.v2fces, fce) - ms.updated = append(ms.updated, FileContractLeaf(&ms.fces[len(ms.fces)-1], false)) + ms.updated = append(ms.updated, fileContractLeaf(&ms.fces[len(ms.fces)-1], false)) } func (ms *MidState) resolveV2FileContractElement(fce types.V2FileContractElement, txid types.TransactionID) { ms.spends[fce.ID] = txid fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.v2fces = append(ms.v2fces, fce) - ms.updated = append(ms.updated, V2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], true)) + ms.updated = append(ms.updated, v2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], true)) } // ApplyTransaction applies a transaction to the MidState. @@ -471,7 +471,7 @@ func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { StateElement: types.StateElement{ID: types.Hash256(bid)}, ChainIndex: types.ChainIndex{Height: ms.base.childHeight(), ID: bid}, } - ms.added = append(ms.added, ChainIndexLeaf(&ms.cie)) + ms.added = append(ms.added, chainIndexLeaf(&ms.cie)) } // An ApplyUpdate represents the effects of applying a block to a state. @@ -521,7 +521,7 @@ func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp ti s.FoundationPrimaryAddress = ms.foundationPrimary s.FoundationFailsafeAddress = ms.foundationFailsafe eau := s.Elements.ApplyBlock(ms.updated, ms.added) - s = ApplyWork(s, b, targetTimestamp) + s = ApplyOrphan(s, b, targetTimestamp) return s, ApplyUpdate{eau, ms} } diff --git a/consensus/validation.go b/consensus/validation.go index 5b0681c8..e2e65c1c 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -11,8 +11,7 @@ import ( "go.sia.tech/core/types" ) -// ValidateHeader validates a header in the context of s. -func ValidateHeader(s State, parentID types.BlockID, timestamp time.Time, nonce uint64, id types.BlockID) error { +func validateHeader(s State, parentID types.BlockID, timestamp time.Time, nonce uint64, id types.BlockID) error { if parentID != s.Index.ID { return errors.New("wrong parent ID") } else if timestamp.Before(s.medianTimestamp()) { @@ -74,7 +73,7 @@ func ValidateOrphan(s State, b types.Block) error { return errors.New("block exceeds maximum weight") } else if err := validateMinerPayouts(s, b); err != nil { return err - } else if err := ValidateHeader(s, b.ParentID, b.Timestamp, b.Nonce, b.ID()); err != nil { + } else if err := validateHeader(s, b.ParentID, b.Timestamp, b.Nonce, b.ID()); err != nil { return err } @@ -105,8 +104,8 @@ type MidState struct { v2fces []types.V2FileContractElement cie types.ChainIndexElement // these alias the above - updated []ElementLeaf - added []ElementLeaf + updated []elementLeaf + added []elementLeaf } // Index returns the index of the MidState's base state. From 50efe7a8ae8cc9c2bb350312d3c35045e08621bb Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 14 Aug 2023 22:55:39 -0400 Subject: [PATCH 20/53] chain: Lift AncestorTimestamp method out of Store --- chain/db.go | 19 ------------------- chain/manager.go | 28 +++++++++++++++++++++++----- chain/manager_test.go | 2 +- consensus/update_test.go | 20 +++++++++++++++++++- 4 files changed, 43 insertions(+), 26 deletions(-) diff --git a/chain/db.go b/chain/db.go index 45fd224e..a91a5e04 100644 --- a/chain/db.go +++ b/chain/db.go @@ -462,25 +462,6 @@ func (db *DBStore) BestIndex(height uint64) (index types.ChainIndex, ok bool) { return } -// AncestorTimestamp implements Store. -func (db *DBStore) AncestorTimestamp(id types.BlockID, depth uint64) time.Time { - c, _ := db.Checkpoint(id) - for i := uint64(1); i < depth; i++ { - // if we're on the best path, we can jump to the n'th block directly - if index, _ := db.BestIndex(c.State.Index.Height); index.ID == id { - height := c.State.Index.Height - (depth - i) - if c.State.Index.Height < (depth - i) { - height = 0 - } - ancestorIndex, _ := db.BestIndex(height) - c, _ = db.Checkpoint(ancestorIndex.ID) - break - } - c, _ = db.Checkpoint(c.Block.ParentID) - } - return c.Block.Timestamp -} - // SupplementTipTransaction implements Store. func (db *DBStore) SupplementTipTransaction(txn types.Transaction) (ts consensus.V1TransactionSupplement) { // get tip state, for proof-trimming diff --git a/chain/manager.go b/chain/manager.go index 7c487811..f24b46da 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -82,7 +82,6 @@ type Subscriber interface { // handled internally, e.g. by panicking or calling os.Exit. type Store interface { BestIndex(height uint64) (types.ChainIndex, bool) - AncestorTimestamp(id types.BlockID, n uint64) time.Time SupplementTipTransaction(txn types.Transaction) consensus.V1TransactionSupplement SupplementTipBlock(b types.Block) consensus.V1BlockSupplement @@ -95,6 +94,25 @@ type Store interface { RevertBlock(s consensus.State, cru consensus.RevertUpdate) } +// ancestorTimestamp returns the timestamp of the n'th ancestor of id. +func ancestorTimestamp(s Store, id types.BlockID, n uint64) time.Time { + c, _ := s.Checkpoint(id) + for i := uint64(1); i < n; i++ { + // if we're on the best path, we can jump to the n'th block directly + if index, _ := s.BestIndex(c.State.Index.Height); index.ID == id { + height := c.State.Index.Height - (n - i) + if c.State.Index.Height < (n - i) { + height = 0 + } + ancestorIndex, _ := s.BestIndex(height) + c, _ = s.Checkpoint(ancestorIndex.ID) + break + } + c, _ = s.Checkpoint(c.Block.ParentID) + } + return c.Block.Timestamp +} + // A Manager tracks multiple blockchains and identifies the best valid // chain. type Manager struct { @@ -228,7 +246,7 @@ func (m *Manager) AddBlocks(blocks []types.Block) error { } else if err := consensus.ValidateOrphan(cs, b); err != nil { return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: b.ID()}, err) } - cs = consensus.ApplyOrphan(cs, b, m.store.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) + cs = consensus.ApplyOrphan(cs, b, ancestorTimestamp(m.store, b.ParentID, cs.AncestorDepth())) m.store.AddCheckpoint(Checkpoint{b, cs, nil}) } @@ -282,11 +300,11 @@ func (m *Manager) applyTip(index types.ChainIndex) error { return fmt.Errorf("block %v is invalid: %w", index, err) } c.Supplement = &bs - targetTimestamp := m.store.AncestorTimestamp(c.Block.ParentID, m.tipState.AncestorDepth()) + targetTimestamp := ancestorTimestamp(m.store, c.Block.ParentID, m.tipState.AncestorDepth()) c.State, cau = consensus.ApplyBlock(m.tipState, c.Block, bs, targetTimestamp) m.store.AddCheckpoint(c) } else { - targetTimestamp := m.store.AncestorTimestamp(c.Block.ParentID, m.tipState.AncestorDepth()) + targetTimestamp := ancestorTimestamp(m.store, c.Block.ParentID, m.tipState.AncestorDepth()) _, cau = consensus.ApplyBlock(m.tipState, c.Block, *c.Supplement, targetTimestamp) } @@ -432,7 +450,7 @@ func (m *Manager) AddSubscriber(s Subscriber, tip types.ChainIndex) error { if !ok { return fmt.Errorf("missing apply parent checkpoint %v", c.Block.ParentID) } - _, cau := consensus.ApplyBlock(pc.State, c.Block, *c.Supplement, m.store.AncestorTimestamp(c.Block.ParentID, pc.State.AncestorDepth())) + _, cau := consensus.ApplyBlock(pc.State, c.Block, *c.Supplement, ancestorTimestamp(m.store, c.Block.ParentID, pc.State.AncestorDepth())) // TODO: commit every minute for large len(apply)? shouldCommit := index == m.tipState.Index if err := s.ProcessChainApplyUpdate(&ApplyUpdate{cau, c.Block, c.State}, shouldCommit); err != nil { diff --git a/chain/manager_test.go b/chain/manager_test.go index 28903fc5..497761ac 100644 --- a/chain/manager_test.go +++ b/chain/manager_test.go @@ -60,7 +60,7 @@ func TestManager(t *testing.T) { }}, } findBlockNonce(cs, &b) - cs, _ = consensus.ApplyBlock(cs, b, store.SupplementTipBlock(b), store.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) + cs, _ = consensus.ApplyBlock(cs, b, store.SupplementTipBlock(b), ancestorTimestamp(store, b.ParentID, cs.AncestorDepth())) blocks = append(blocks, b) } return diff --git a/consensus/update_test.go b/consensus/update_test.go index 3c286e0e..c21b6d40 100644 --- a/consensus/update_test.go +++ b/consensus/update_test.go @@ -4,12 +4,30 @@ import ( "encoding/json" "reflect" "testing" + "time" "go.sia.tech/core/chain" "go.sia.tech/core/consensus" "go.sia.tech/core/types" ) +func ancestorTimestamp(s chain.Store, id types.BlockID, n uint64) time.Time { + c, _ := s.Checkpoint(id) + for i := uint64(1); i < n; i++ { + if index, _ := s.BestIndex(c.State.Index.Height); index.ID == id { + height := c.State.Index.Height - (n - i) + if c.State.Index.Height < (n - i) { + height = 0 + } + ancestorIndex, _ := s.BestIndex(height) + c, _ = s.Checkpoint(ancestorIndex.ID) + break + } + c, _ = s.Checkpoint(c.Block.ParentID) + } + return c.Block.Timestamp +} + func TestApplyBlock(t *testing.T) { n, genesisBlock := chain.TestnetZen() @@ -62,7 +80,7 @@ func TestApplyBlock(t *testing.T) { if err = consensus.ValidateBlock(cs, b, bs); err != nil { return } - cs, au = consensus.ApplyBlock(cs, b, bs, dbStore.AncestorTimestamp(b.ParentID, cs.AncestorDepth())) + cs, au = consensus.ApplyBlock(cs, b, bs, ancestorTimestamp(dbStore, b.ParentID, cs.AncestorDepth())) return } checkUpdateElements := func(au consensus.ApplyUpdate, addedSCEs, spentSCEs []types.SiacoinElement, addedSFEs, spentSFEs []types.SiafundElement) { From 1157bec77735cf3bb97a0372e2076c057ecb3602 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 14 Aug 2023 22:56:44 -0400 Subject: [PATCH 21/53] chain: Track invalid blocks --- chain/manager.go | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/chain/manager.go b/chain/manager.go index f24b46da..d259fcfd 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -116,10 +116,11 @@ func ancestorTimestamp(s Store, id types.BlockID, n uint64) time.Time { // A Manager tracks multiple blockchains and identifies the best valid // chain. type Manager struct { - store Store - tipState consensus.State - subscribers []Subscriber - lastCommit time.Time + store Store + tipState consensus.State + subscribers []Subscriber + lastCommit time.Time + invalidBlocks map[types.BlockID]error txpool struct { txns []types.Transaction @@ -230,21 +231,25 @@ func (m *Manager) AddBlocks(blocks []types.Block) error { cs := m.tipState for _, b := range blocks { - if c, ok := m.store.Checkpoint(b.ID()); ok { + bid := b.ID() + if err := m.invalidBlocks[bid]; err != nil { + return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: bid}, err) + } else if c, ok := m.store.Checkpoint(bid); ok { // already have this block cs = c.State continue } else if b.ParentID != c.State.Index.ID { c, ok := m.store.Checkpoint(b.ParentID) if !ok { - return fmt.Errorf("missing parent checkpoint for block %v", b.ID()) + return fmt.Errorf("missing parent checkpoint for block %v", bid) } cs = c.State } if b.Timestamp.After(cs.MaxFutureTimestamp(time.Now())) { return ErrFutureBlock } else if err := consensus.ValidateOrphan(cs, b); err != nil { - return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: b.ID()}, err) + m.markBadBlock(bid, err) + return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: bid}, err) } cs = consensus.ApplyOrphan(cs, b, ancestorTimestamp(m.store, b.ParentID, cs.AncestorDepth())) m.store.AddCheckpoint(Checkpoint{b, cs, nil}) @@ -261,6 +266,20 @@ func (m *Manager) AddBlocks(blocks []types.Block) error { return nil } +// markBadBlock marks a block as bad, so that we don't waste resources +// re-validating it if we see it again. +func (m *Manager) markBadBlock(bid types.BlockID, err error) { + const maxInvalidBlocks = 1000 + m.invalidBlocks[bid] = err + if len(m.invalidBlocks) > maxInvalidBlocks { + // forget a random entry + for bid := range m.invalidBlocks { + delete(m.invalidBlocks, bid) + break + } + } +} + // revertTip reverts the current tip. func (m *Manager) revertTip() error { c, ok := m.store.Checkpoint(m.tipState.Index.ID) @@ -297,6 +316,7 @@ func (m *Manager) applyTip(index types.ChainIndex) error { } else if c.Supplement == nil { bs := m.store.SupplementTipBlock(c.Block) if err := consensus.ValidateBlock(m.tipState, c.Block, bs); err != nil { + m.markBadBlock(index.ID, err) return fmt.Errorf("block %v is invalid: %w", index, err) } c.Supplement = &bs @@ -939,9 +959,10 @@ func (m *Manager) AddV2PoolTransactions(txns []types.V2Transaction) error { // NewManager returns a Manager initialized with the provided Store and State. func NewManager(store Store, cs consensus.State) *Manager { m := &Manager{ - store: store, - tipState: cs, - lastCommit: time.Now(), + store: store, + tipState: cs, + lastCommit: time.Now(), + invalidBlocks: make(map[types.BlockID]error), } m.txpool.indices = make(map[types.TransactionID]int) return m From 06354380b86e3fd6d4392f716199cbf6656319f2 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 14 Aug 2023 23:15:46 -0400 Subject: [PATCH 22/53] consensus: Add attestations to accumulator This enables untrusted downloads of the full set of attestations, which is important for syncing new nodes. Without this functionality, a new renter would have to scan the entire blockchain for host announcements, which would nullify one of the main benefits of utreexo (instant syncing). --- consensus/merkle.go | 23 ++++++++++++++--- consensus/state.go | 5 +++- consensus/update.go | 12 +++++++++ consensus/validation.go | 1 + types/encoding.go | 12 --------- types/types.go | 57 ++++++++++++++++++----------------------- 6 files changed, 61 insertions(+), 49 deletions(-) diff --git a/consensus/merkle.go b/consensus/merkle.go index 39863d3e..3c8d227f 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -73,6 +73,21 @@ func (l elementLeaf) proofRoot() types.Hash256 { return proofRoot(l.hash(), l.LeafIndex, l.MerkleProof) } +// chainIndexLeaf returns the elementLeaf for a ChainIndexElement. +func chainIndexLeaf(e *types.ChainIndexElement) elementLeaf { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + h.E.WriteString("sia/leaf/chainindex|") + e.StateElement.ID.EncodeTo(h.E) + e.ChainIndex.EncodeTo(h.E) + return elementLeaf{ + StateElement: &e.StateElement, + ElementHash: h.Sum(), + Spent: false, + } +} + // siacoinLeaf returns the elementLeaf for a SiacoinElement. func siacoinLeaf(e *types.SiacoinElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) @@ -135,14 +150,14 @@ func v2FileContractLeaf(e *types.V2FileContractElement, spent bool) elementLeaf } } -// chainIndexLeaf returns the elementLeaf for a ChainIndexElement. -func chainIndexLeaf(e *types.ChainIndexElement) elementLeaf { +// attestationLeaf returns the elementLeaf for an AttestationElement. +func attestationLeaf(e *types.AttestationElement) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/leaf/chainindex|") + h.E.WriteString("sia/leaf/attestation|") e.StateElement.ID.EncodeTo(h.E) - e.ChainIndex.EncodeTo(h.E) + e.Attestation.EncodeTo(h.E) return elementLeaf{ StateElement: &e.StateElement, ElementHash: h.Sum(), diff --git a/consensus/state.go b/consensus/state.go index 64308e3a..b677b59b 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -94,7 +94,8 @@ type State struct { FoundationPrimaryAddress types.Address `json:"foundationPrimaryAddress"` FoundationFailsafeAddress types.Address `json:"foundationFailsafeAddress"` - Elements ElementAccumulator `json:"elements"` + Elements ElementAccumulator `json:"elements"` + Attestations uint64 `json:"attestations"` } // EncodeTo implements types.EncoderTo. @@ -112,6 +113,7 @@ func (s State) EncodeTo(e *types.Encoder) { s.FoundationPrimaryAddress.EncodeTo(e) s.FoundationFailsafeAddress.EncodeTo(e) s.Elements.EncodeTo(e) + e.WriteUint64(s.Attestations) } // DecodeFrom implements types.DecoderFrom. @@ -129,6 +131,7 @@ func (s *State) DecodeFrom(d *types.Decoder) { s.FoundationPrimaryAddress.DecodeFrom(d) s.FoundationFailsafeAddress.DecodeFrom(d) s.Elements.DecodeFrom(d) + s.Attestations = d.ReadUint64() } func (s State) childHeight() uint64 { return s.Index.Height + 1 } diff --git a/consensus/update.go b/consensus/update.go index ec80181e..e7efe475 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -280,6 +280,11 @@ func (ms *MidState) resolveV2FileContractElement(fce types.V2FileContractElement ms.updated = append(ms.updated, v2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], true)) } +func (ms *MidState) addAttestationElement(ae types.AttestationElement) { + ms.aes = append(ms.aes, ae) + ms.added = append(ms.added, attestationLeaf(&ms.aes[len(ms.aes)-1])) +} + // ApplyTransaction applies a transaction to the MidState. func (ms *MidState) ApplyTransaction(txn types.Transaction, ts V1TransactionSupplement) { txid := txn.ID() @@ -422,6 +427,12 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { MaturityHeight: ms.base.MaturityHeight(), }) } + for _, a := range txn.Attestations { + ms.addAttestationElement(types.AttestationElement{ + StateElement: nextElement(), + Attestation: a, + }) + } if txn.NewFoundationAddress != nil { ms.foundationPrimary = *txn.NewFoundationAddress ms.foundationFailsafe = *txn.NewFoundationAddress @@ -518,6 +529,7 @@ func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp ti ms := NewMidState(s) ms.ApplyBlock(b, bs) s.SiafundPool = ms.siafundPool + s.Attestations += uint64(len(ms.aes)) s.FoundationPrimaryAddress = ms.foundationPrimary s.FoundationFailsafeAddress = ms.foundationFailsafe eau := s.Elements.ApplyBlock(ms.updated, ms.added) diff --git a/consensus/validation.go b/consensus/validation.go index e2e65c1c..8ba9fe91 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -102,6 +102,7 @@ type MidState struct { sfes []types.SiafundElement fces []types.FileContractElement v2fces []types.V2FileContractElement + aes []types.AttestationElement cie types.ChainIndexElement // these alias the above updated []elementLeaf diff --git a/types/encoding.go b/types/encoding.go index d22499d5..8969d592 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -577,12 +577,6 @@ func (fce FileContractElement) EncodeTo(e *Encoder) { fce.FileContract.EncodeTo(e) } -// EncodeTo implements types.EncoderTo. -func (fcer FileContractElementRevision) EncodeTo(e *Encoder) { - fcer.Parent.EncodeTo(e) - fcer.Revision.EncodeTo(e) -} - // EncodeTo implements types.EncoderTo. func (fce V2FileContractElement) EncodeTo(e *Encoder) { fce.StateElement.EncodeTo(e) @@ -1131,12 +1125,6 @@ func (fce *FileContractElement) DecodeFrom(d *Decoder) { fce.FileContract.DecodeFrom(d) } -// DecodeFrom implements types.DecoderFrom. -func (fcer *FileContractElementRevision) DecodeFrom(d *Decoder) { - fcer.Parent.DecodeFrom(d) - fcer.Revision.DecodeFrom(d) -} - // DecodeFrom implements types.DecoderFrom. func (fce *V2FileContractElement) DecodeFrom(d *Decoder) { fce.StateElement.DecodeFrom(d) diff --git a/types/types.go b/types/types.go index f329bddf..fc0f57b0 100644 --- a/types/types.go +++ b/types/types.go @@ -581,6 +581,19 @@ type V2StorageProof struct { // storing any data, it will resolve as valid; otherwise, it resolves as missed. type V2FileContractExpiration struct{} +// An Attestation associates a key-value pair with an identity. For example, +// hosts attest to their network address by setting Key to "HostAnnouncement" +// and Value to their address, thereby allowing renters to discover them. +// Generally, an attestation for a particular key is considered to overwrite any +// previous attestations with the same key. (This allows hosts to announce a new +// network address, for example.) +type Attestation struct { + PublicKey PublicKey `json:"publicKey"` + Key string `json:"key"` + Value []byte `json:"value"` + Signature Signature `json:"signature"` +} + // A StateElement is a generic element within the state accumulator. type StateElement struct { ID Hash256 `json:"id"` // SiacoinOutputID, FileContractID, etc. @@ -588,65 +601,45 @@ type StateElement struct { MerkleProof []Hash256 `json:"merkleProof"` } -// A ChainIndexElement is a record of a block in the chain. +// A ChainIndexElement is a record of a SiacoinOutput within the state accumulator. type ChainIndexElement struct { StateElement ChainIndex } -// A SiacoinElement is a volume of siacoins that is created and spent as an -// atomic unit. +// A SiacoinElement is a record of a SiacoinOutput within the state accumulator. type SiacoinElement struct { StateElement SiacoinOutput MaturityHeight uint64 `json:"maturityHeight"` } -// A SiafundElement is a volume of siafunds that is created and spent as an -// atomic unit. +// A SiafundElement is a record of a SiafundOutput within the state accumulator. type SiafundElement struct { StateElement SiafundOutput ClaimStart Currency `json:"claimStart"` // value of SiafundPool when element was created } -// A FileContractElement is a storage agreement between a renter and a host. +// A FileContractElement is a record of a FileContract within the state +// accumulator. type FileContractElement struct { StateElement FileContract } -// A FileContractElementRevision updates the state of an existing file contract. -type FileContractElementRevision struct { - Parent FileContractElement `json:"parent"` - Revision FileContract `json:"revision"` -} - -// RevisedElement returns the post-revision FileContractElement. -func (fcer FileContractElementRevision) RevisedElement() FileContractElement { - fce := fcer.Parent - fce.FileContract = fcer.Revision - fce.Payout = fcer.Parent.Payout // see FileContractRevision docstring - return fce -} - -// A V2FileContractElement is a storage agreement between a renter and a host. +// A V2FileContractElement is a record of a V2FileContract within the state +// accumulator. type V2FileContractElement struct { StateElement V2FileContract } -// An Attestation associates a key-value pair with an identity. For example, -// hosts attest to their network address by setting Key to "HostAnnouncement" -// and Value to their address, thereby allowing renters to discover them. -// Generally, an attestation for a particular key is considered to overwrite any -// previous attestations with the same key. (This allows hosts to announce a new -// network address, for example.) -type Attestation struct { - PublicKey PublicKey `json:"publicKey"` - Key string `json:"key"` - Value []byte `json:"value"` - Signature Signature `json:"signature"` +// An AttestationElement is a record of an Attestation within the state +// accumulator. +type AttestationElement struct { + StateElement + Attestation } // A V2Transaction effects a change of blockchain state. From f961cbfffe922288dc83d73b77026e8f64381302 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 15 Aug 2023 00:06:57 -0400 Subject: [PATCH 23/53] consensus: Fix spent handling in RevertBlock --- consensus/merkle.go | 42 +++++++++++++++++------------------------ consensus/update.go | 12 ++---------- consensus/validation.go | 14 +++++++------- types/types.go | 12 ++++++------ 4 files changed, 32 insertions(+), 48 deletions(-) diff --git a/consensus/merkle.go b/consensus/merkle.go index 3c8d227f..fca03339 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -235,44 +235,31 @@ func (acc *ElementAccumulator) containsLeaf(l elementLeaf) bool { return acc.hasTreeAtHeight(len(l.MerkleProof)) && acc.Trees[len(l.MerkleProof)] == l.proofRoot() } -// ContainsBlock returns true if the accumulator contains cie. -func (acc *ElementAccumulator) ContainsBlock(cie types.ChainIndexElement) bool { +func (acc *ElementAccumulator) containsChainIndex(cie types.ChainIndexElement) bool { return acc.containsLeaf(chainIndexLeaf(&cie)) } -// ContainsUnspentSiacoinElement returns true if the accumulator contains sce as an -// unspent output. -func (acc *ElementAccumulator) ContainsUnspentSiacoinElement(sce types.SiacoinElement) bool { +func (acc *ElementAccumulator) containsUnspentSiacoinElement(sce types.SiacoinElement) bool { return acc.containsLeaf(siacoinLeaf(&sce, false)) } -// ContainsSpentSiacoinElement returns true if the accumulator contains sce as a -// spent output. -func (acc *ElementAccumulator) ContainsSpentSiacoinElement(sce types.SiacoinElement) bool { +func (acc *ElementAccumulator) containsSpentSiacoinElement(sce types.SiacoinElement) bool { return acc.containsLeaf(siacoinLeaf(&sce, true)) } -// ContainsUnspentSiafundElement returns true if the accumulator contains e as an -// unspent output. -func (acc *ElementAccumulator) ContainsUnspentSiafundElement(sfe types.SiafundElement) bool { +func (acc *ElementAccumulator) containsUnspentSiafundElement(sfe types.SiafundElement) bool { return acc.containsLeaf(siafundLeaf(&sfe, false)) } -// ContainsSpentSiafundElement returns true if the accumulator contains o as a -// spent output. -func (acc *ElementAccumulator) ContainsSpentSiafundElement(sfe types.SiafundElement) bool { +func (acc *ElementAccumulator) containsSpentSiafundElement(sfe types.SiafundElement) bool { return acc.containsLeaf(siafundLeaf(&sfe, true)) } -// ContainsUnresolvedV2FileContractElement returns true if the accumulator -// contains fce as an unresolved file contract. -func (acc *ElementAccumulator) ContainsUnresolvedV2FileContractElement(fce types.V2FileContractElement) bool { +func (acc *ElementAccumulator) containsUnresolvedV2FileContractElement(fce types.V2FileContractElement) bool { return acc.containsLeaf(v2FileContractLeaf(&fce, false)) } -// ContainsResolvedV2FileContractElement returns true if the accumulator contains -// fce as a resolved file contract. -func (acc *ElementAccumulator) ContainsResolvedV2FileContractElement(fce types.V2FileContractElement) bool { +func (acc *ElementAccumulator) containsResolvedV2FileContractElement(fce types.V2FileContractElement) bool { return acc.containsLeaf(v2FileContractLeaf(&fce, true)) } @@ -405,9 +392,9 @@ func updateLeaves(leaves []elementLeaf) [64][]elementLeaf { return trees } -// ApplyBlock applies the supplied leaves to the accumulator, modifying it and +// applyBlock applies the supplied leaves to the accumulator, modifying it and // producing an update. -func (acc *ElementAccumulator) ApplyBlock(updated, added []elementLeaf) (eau ElementApplyUpdate) { +func (acc *ElementAccumulator) applyBlock(updated, added []elementLeaf) (eau ElementApplyUpdate) { eau.updated = updateLeaves(updated) for height, es := range eau.updated { if len(es) > 0 { @@ -421,10 +408,15 @@ func (acc *ElementAccumulator) ApplyBlock(updated, added []elementLeaf) (eau Ele return eau } -// RevertBlock modifies the proofs of supplied elements such that they validate +// revertBlock modifies the proofs of supplied elements such that they validate // under acc, which must be the accumulator prior to the application of those -// elements. The accumulator itself is not modified. -func (acc *ElementAccumulator) RevertBlock(updated []elementLeaf) (eru ElementRevertUpdate) { +// elements. All of the elements will be marked unspent. The accumulator itself +// is not modified. +func (acc *ElementAccumulator) revertBlock(updated []elementLeaf) (eru ElementRevertUpdate) { + for i := range updated { + // reverting a block can never cause an element to become spent + updated[i].Spent = false + } eru.updated = updateLeaves(updated) eru.numLeaves = acc.NumLeaves return diff --git a/consensus/update.go b/consensus/update.go index e7efe475..2781bf6a 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -532,7 +532,7 @@ func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp ti s.Attestations += uint64(len(ms.aes)) s.FoundationPrimaryAddress = ms.foundationPrimary s.FoundationFailsafeAddress = ms.foundationFailsafe - eau := s.Elements.ApplyBlock(ms.updated, ms.added) + eau := s.Elements.applyBlock(ms.updated, ms.added) s = ApplyOrphan(s, b, targetTimestamp) return s, ApplyUpdate{eau, ms} } @@ -575,14 +575,6 @@ func RevertBlock(s State, b types.Block, bs V1BlockSupplement) RevertUpdate { } ms := NewMidState(s) ms.ApplyBlock(b, bs) - // invert spends - // - // TODO: this might be horribly inadequate - for i := range ms.updated { - _, spent := ms.spends[ms.updated[i].ID] - ms.updated[i].Spent = !spent - } - - eru := s.Elements.RevertBlock(ms.updated) + eru := s.Elements.revertBlock(ms.updated) return RevertUpdate{eru, ms} } diff --git a/consensus/validation.go b/consensus/validation.go index 8ba9fe91..7e1ae5f9 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -707,8 +707,8 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { if _, ok := ms.ephemeral[sci.Parent.ID]; !ok { return fmt.Errorf("siacoin input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) } - } else if !ms.base.Elements.ContainsUnspentSiacoinElement(sci.Parent) { - if ms.base.Elements.ContainsSpentSiacoinElement(sci.Parent) { + } else if !ms.base.Elements.containsUnspentSiacoinElement(sci.Parent) { + if ms.base.Elements.containsSpentSiacoinElement(sci.Parent) { return fmt.Errorf("siacoin input %v double-spends output %v", i, sci.Parent.ID) } return fmt.Errorf("siacoin input %v spends output (%v) not present in the accumulator", i, sci.Parent.ID) @@ -763,8 +763,8 @@ func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { if _, ok := ms.ephemeral[sci.Parent.ID]; !ok { return fmt.Errorf("siafund input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) } - } else if !ms.base.Elements.ContainsUnspentSiafundElement(sci.Parent) { - if ms.base.Elements.ContainsSpentSiafundElement(sci.Parent) { + } else if !ms.base.Elements.containsUnspentSiafundElement(sci.Parent) { + if ms.base.Elements.containsSpentSiafundElement(sci.Parent) { return fmt.Errorf("siafund input %v double-spends output %v", i, sci.Parent.ID) } return fmt.Errorf("siafund input %v spends output (%v) not present in the accumulator", i, sci.Parent.ID) @@ -805,8 +805,8 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { validateParent := func(fce types.V2FileContractElement) error { if txid, ok := ms.spent(fce.ID); ok { return fmt.Errorf("has already been resolved in transaction %v", txid) - } else if !ms.base.Elements.ContainsUnresolvedV2FileContractElement(fce) { - if ms.base.Elements.ContainsResolvedV2FileContractElement(fce) { + } else if !ms.base.Elements.containsUnresolvedV2FileContractElement(fce) { + if ms.base.Elements.containsResolvedV2FileContractElement(fce) { return errors.New("has already been resolved in a previous block") } return errors.New("is not present in the accumulator") @@ -930,7 +930,7 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } else if sp.ProofStart.Height != fc.ProofHeight { // see note on this field in types.StorageProof return fmt.Errorf("file contract storage proof %v has ProofStart (%v) that does not match contract ProofStart (%v)", i, sp.ProofStart.Height, fc.ProofHeight) - } else if ms.base.Elements.ContainsBlock(sp.ProofStart) { + } else if ms.base.Elements.containsChainIndex(sp.ProofStart) { return fmt.Errorf("file contract storage proof %v has invalid history proof", i) } leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart.ChainIndex.ID, types.FileContractID(fcr.Parent.ID)) diff --git a/types/types.go b/types/types.go index fc0f57b0..2bb16ec9 100644 --- a/types/types.go +++ b/types/types.go @@ -562,13 +562,13 @@ type V2FileContractRenewal struct { // Merkle tree of a V2FileContract's data. type V2StorageProof struct { // Selecting the leaf requires a source of unpredictable entropy; we use the - // ID of the block at the contract's ProofHeight. The StorageProof includes - // this ID, and asserts its presence in the chain via a history proof. + // ID of the block at the contract's ProofHeight. The storage proof thus + // includes a proof that this ID is the correct ancestor. // - // For convenience, ProofStart is a ChainIndex rather than a BlockID. - // Consequently, ProofStart.Height MUST match the ProofStart field of the - // contract's final revision; otherwise, the prover could use any - // ProofStart, giving them control over the leaf index. + // During validation, it is imperative to check that ProofStart.Height + // matches the ProofStart field of the contract's final revision; otherwise, + // the prover could use any ProofStart, giving them control over the leaf + // index. ProofStart ChainIndexElement // The leaf is always 64 bytes, extended with zeros if necessary. From fa6ad9edbfeba9aab93cfb7d21d9326ae8386c3b Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 15 Aug 2023 14:23:22 -0400 Subject: [PATCH 24/53] consensus: Reject v1 contracts that end after hardfork --- consensus/validation.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/consensus/validation.go b/consensus/validation.go index 7e1ae5f9..e9531554 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -329,6 +329,8 @@ func validateFileContracts(ms *MidState, txn types.Transaction, ts V1Transaction return fmt.Errorf("file contract %v has window that starts in the past", i) } else if fc.WindowEnd <= fc.WindowStart { return fmt.Errorf("file contract %v has window that ends before it begins", i) + } else if fc.WindowStart >= ms.base.Network.HardforkV2.RequireHeight { + return fmt.Errorf("file contract %v ends after v2 hardfork", i) } var validSum, missedSum types.Currency for _, output := range fc.ValidProofOutputs { @@ -351,6 +353,8 @@ func validateFileContracts(ms *MidState, txn types.Transaction, ts V1Transaction return fmt.Errorf("file contract revision %v has window that starts in the past", i) } else if fcr.FileContract.WindowEnd <= fcr.FileContract.WindowStart { return fmt.Errorf("file contract revision %v has window that ends before it begins", i) + } else if fcr.WindowStart >= ms.base.Network.HardforkV2.RequireHeight { + return fmt.Errorf("file contract revision %v ends after v2 hardfork", i) } else if txid, ok := ms.spent(types.Hash256(fcr.ParentID)); ok { return fmt.Errorf("file contract revision %v conflicts with previous proof or revision (in %v)", i, txid) } From 547ce0fecff8fe5531d3f23cfbaadda6a50ae282 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 15 Aug 2023 18:36:50 -0400 Subject: [PATCH 25/53] consensus: Implement (State).V2TransactionWeight Still need to establish good coefficients for size and signatures. These should be established based on historical data. --- consensus/state.go | 76 +++++++++++++++++++++++++++++++++++------ consensus/validation.go | 14 ++++++-- types/encoding.go | 25 -------------- 3 files changed, 76 insertions(+), 39 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index b677b59b..0c55c661 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -222,26 +222,80 @@ func (s State) MaxBlockWeight() uint64 { return 2_000_000 } +type writeCounter struct{ n int } + +func (wc *writeCounter) Write(p []byte) (int, error) { + wc.n += len(p) + return len(p), nil +} + // TransactionWeight computes the weight of a txn. func (s State) TransactionWeight(txn types.Transaction) uint64 { - return uint64(types.EncodedLen(txn)) + var wc writeCounter + e := types.NewEncoder(&wc) + txn.EncodeTo(e) + e.Flush() + return uint64(wc.n) } // V2TransactionWeight computes the weight of a txn. func (s State) V2TransactionWeight(txn types.V2Transaction) uint64 { - return uint64(types.EncodedLen(txn)) // TODO -} + var wc writeCounter + e := types.NewEncoder(&wc) + for _, sci := range txn.SiacoinInputs { + sci.Parent.MerkleProof = nil + sci.EncodeTo(e) + } + for _, sco := range txn.SiacoinOutputs { + sco.EncodeTo(e) + } + for _, sfi := range txn.SiafundInputs { + sfi.Parent.MerkleProof = nil + sfi.EncodeTo(e) + } + for _, sfo := range txn.SiafundOutputs { + sfo.EncodeTo(e) + } + for _, fc := range txn.FileContracts { + fc.EncodeTo(e) + } + for _, fcr := range txn.FileContractRevisions { + fcr.Parent.MerkleProof = nil + fcr.EncodeTo(e) + } + for _, fcr := range txn.FileContractResolutions { + fcr.Parent.MerkleProof = nil + if sp, ok := fcr.Resolution.(types.V2StorageProof); ok { + sp.ProofStart.MerkleProof = nil + fcr.Resolution = sp + } + fcr.EncodeTo(e) + } + for _, a := range txn.Attestations { + a.EncodeTo(e) + } + e.WriteBytes(txn.ArbitraryData) + storage := uint64(wc.n) -// BlockWeight computes the combined weight of a block's txns. -func (s State) BlockWeight(txns []types.Transaction, v2txns []types.V2Transaction) uint64 { - var weight uint64 - for _, txn := range txns { - weight += s.TransactionWeight(txn) + var signatures int + for _, sci := range txn.SiacoinInputs { + signatures += len(sci.Signatures) } - for _, txn := range v2txns { - weight += s.V2TransactionWeight(txn) + for _, sfi := range txn.SiafundInputs { + signatures += len(sfi.Signatures) + } + signatures += 2 * len(txn.FileContracts) + signatures += 2 * len(txn.FileContractRevisions) + for _, fcr := range txn.FileContractResolutions { + switch fcr.Resolution.(type) { + case types.V2FileContractRenewal, types.V2FileContract: + signatures += 2 + } } - return weight + signatures += len(txn.Attestations) + + // TODO: choose coefficients empirically + return storage + 100*uint64(signatures) } // FileContractTax computes the tax levied on a given contract. diff --git a/consensus/validation.go b/consensus/validation.go index e9531554..3dbd5cc2 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -68,9 +68,17 @@ func validateMinerPayouts(s State, b types.Block) error { // ValidateOrphan validates b in the context of s. func ValidateOrphan(s State, b types.Block) error { - // TODO: calculate size more efficiently - if uint64(types.EncodedLen(types.V1Block(b))) > s.MaxBlockWeight() { - return errors.New("block exceeds maximum weight") + var weight uint64 + for _, txn := range b.Transactions { + weight += s.TransactionWeight(txn) + } + if b.V2 != nil { + for _, txn := range b.V2.Transactions { + weight += s.V2TransactionWeight(txn) + } + } + if weight > s.MaxBlockWeight() { + return fmt.Errorf("block exceeds maximum weight (%v > %v)", weight, s.MaxBlockWeight()) } else if err := validateMinerPayouts(s, b); err != nil { return err } else if err := validateHeader(s, b.ParentID, b.Timestamp, b.Nonce, b.ID()); err != nil { diff --git a/types/encoding.go b/types/encoding.go index 8969d592..a1bbc098 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -100,31 +100,6 @@ type EncoderTo interface { EncodeTo(e *Encoder) } -// EncodedLen returns the length of v when encoded. -func EncodedLen(v interface{}) int { - var buf bytes.Buffer - e := NewEncoder(&buf) - if et, ok := v.(EncoderTo); ok { - et.EncodeTo(e) - } else { - switch v := v.(type) { - case bool: - e.WriteBool(v) - case uint64: - e.WriteUint64(v) - case time.Time: - e.WriteTime(v) - case []byte: - e.WritePrefix(len(v)) - e.Write(v) - default: - panic(fmt.Sprintf("cannot encode type %T", v)) - } - } - _ = e.Flush() // no error possible - return buf.Len() -} - // A Decoder reads values from an underlying stream. Callers MUST check // (*Decoder).Err before using any decoded values. type Decoder struct { From 9ab395098c3755474fc1c655661beefd30b9b134 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Wed, 16 Aug 2023 18:20:41 -0400 Subject: [PATCH 26/53] consensus: Use Work-based PoW in v2 Pre-hardfork, the Work values are computed as the inverse of their Target counterparts; post-hardfork, it's the opposite. Importantly, this means that PoW is still checked by comparing against ChildTarget, even though all of the difficulty adjustment math is in terms of Work. Other Target-based fields, like Depth and OakTarget, should be ignored post-hardfork. It would be nice if we could check work by comparing the difficulty to the expected number of hashes for the ID, but this would break compat- ibility with existing miners (and introduce a costly 256-bit division in the mining loop -- no thanks). So why make this change at all? Because Work-based math is more intuitive and easier for newcomers to understand; also, future nodes, ignorant of pre-v2 history, will be able to strip out the Target math entirely. --- consensus/state.go | 28 +++++-- consensus/update.go | 186 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 174 insertions(+), 40 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 0c55c661..28cca9cc 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -70,11 +70,13 @@ func (n *Network) GenesisState() State { ChildTarget: n.InitialTarget, SiafundPool: types.ZeroCurrency, - OakTime: 0, - OakTarget: intToTarget(maxTarget), - + OakTime: 0, + OakTarget: intToTarget(maxTarget), FoundationPrimaryAddress: n.HardforkFoundation.PrimaryAddress, FoundationFailsafeAddress: n.HardforkFoundation.FailsafeAddress, + TotalWork: Work{invTarget(intToTarget(maxTarget))}, + Difficulty: Work{invTarget(n.InitialTarget)}, + OakWork: Work{invTarget(intToTarget(maxTarget))}, } } @@ -83,17 +85,21 @@ type State struct { Network *Network `json:"-"` // network parameters are not encoded Index types.ChainIndex `json:"index"` - PrevTimestamps [11]time.Time `json:"prevTimestamps"` + PrevTimestamps [11]time.Time `json:"prevTimestamps"` // newest -> oldest Depth types.BlockID `json:"depth"` ChildTarget types.BlockID `json:"childTarget"` SiafundPool types.Currency `json:"siafundPool"` - // hardfork-related state - OakTime time.Duration `json:"oakTime"` - OakTarget types.BlockID `json:"oakTarget"` + // Oak hardfork state + OakTime time.Duration `json:"oakTime"` + OakTarget types.BlockID `json:"oakTarget"` + // Foundation hardfork state FoundationPrimaryAddress types.Address `json:"foundationPrimaryAddress"` FoundationFailsafeAddress types.Address `json:"foundationFailsafeAddress"` - + // v2 hardfork state + TotalWork Work `json:"totalWork"` + Difficulty Work `json:"difficulty"` + OakWork Work `json:"oakWork"` Elements ElementAccumulator `json:"elements"` Attestations uint64 `json:"attestations"` } @@ -112,6 +118,9 @@ func (s State) EncodeTo(e *types.Encoder) { s.OakTarget.EncodeTo(e) s.FoundationPrimaryAddress.EncodeTo(e) s.FoundationFailsafeAddress.EncodeTo(e) + s.TotalWork.EncodeTo(e) + s.Difficulty.EncodeTo(e) + s.OakWork.EncodeTo(e) s.Elements.EncodeTo(e) e.WriteUint64(s.Attestations) } @@ -130,6 +139,9 @@ func (s *State) DecodeFrom(d *types.Decoder) { s.OakTarget.DecodeFrom(d) s.FoundationPrimaryAddress.DecodeFrom(d) s.FoundationFailsafeAddress.DecodeFrom(d) + s.TotalWork.DecodeFrom(d) + s.Difficulty.DecodeFrom(d) + s.OakWork.DecodeFrom(d) s.Elements.DecodeFrom(d) s.Attestations = d.ReadUint64() } diff --git a/consensus/update.go b/consensus/update.go index 2781bf6a..89940c63 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -2,14 +2,89 @@ package consensus import ( "bytes" + "encoding/binary" "math/big" + "math/bits" "time" "go.sia.tech/core/types" ) +// Work represents a quantity of work. +type Work struct { + // expected number of tries required to produce a given hash (big-endian) + n [32]byte +} + +// Cmp compares two work values. +func (w Work) Cmp(v Work) int { + return bytes.Compare(w.n[:], v.n[:]) +} + +// EncodeTo implements types.EncoderTo. +func (w Work) EncodeTo(e *types.Encoder) { e.Write(w.n[:]) } + +// DecodeFrom implements types.DecoderFrom. +func (w *Work) DecodeFrom(d *types.Decoder) { d.Read(w.n[:]) } + +func (w Work) add(v Work) Work { + var r Work + var sum, c uint64 + for i := 24; i >= 0; i -= 8 { + wi := binary.BigEndian.Uint64(w.n[i:]) + vi := binary.BigEndian.Uint64(v.n[i:]) + sum, c = bits.Add64(wi, vi, c) + binary.BigEndian.PutUint64(r.n[i:], sum) + } + return r +} + +func (w Work) sub(v Work) Work { + var r Work + var sum, c uint64 + for i := 24; i >= 0; i -= 8 { + wi := binary.BigEndian.Uint64(w.n[i:]) + vi := binary.BigEndian.Uint64(v.n[i:]) + sum, c = bits.Sub64(wi, vi, c) + binary.BigEndian.PutUint64(r.n[i:], sum) + } + return r +} + +func (w Work) mul64(v uint64) Work { + var r Work + var c uint64 + for i := 24; i >= 0; i -= 8 { + wi := binary.BigEndian.Uint64(w.n[i:]) + hi, prod := bits.Mul64(wi, v) + prod, cc := bits.Add64(prod, c, 0) + c = hi + cc + binary.BigEndian.PutUint64(r.n[i:], prod) + } + return r +} + +func (w Work) div64(v uint64) Work { + var r Work + var quo, rem uint64 + for i := 0; i < len(w.n); i += 8 { + wi := binary.BigEndian.Uint64(w.n[i:]) + quo, rem = bits.Div64(rem, wi, v) + binary.BigEndian.PutUint64(r.n[i:], quo) + } + return r +} + +// prior to v2, work is represented in terms of "target" hashes, i.e. the inverse of Work + var maxTarget = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(1)) +func invTarget(n [32]byte) (inv [32]byte) { + i := new(big.Int).SetBytes(n[:]) + i.Div(maxTarget, i).FillBytes(inv[:]) + return +} + func intToTarget(i *big.Int) (t types.BlockID) { if i.BitLen() >= 256 { i = maxTarget @@ -36,6 +111,17 @@ func mulTargetFrac(x types.BlockID, n, d int64) (m types.BlockID) { return intToTarget(i) } +func updateTotalWork(s State) (Work, types.BlockID) { + // prior to the hardfork, we compute the work from the depth; after the + // hardfork, we do the opposite + if s.childHeight() < s.Network.HardforkV2.AllowHeight { + depth := addTarget(s.Depth, s.ChildTarget) + return Work{invTarget(depth)}, depth + } + totalWork := s.TotalWork.add(s.Difficulty) + return totalWork, invTarget(totalWork.n) +} + func updateOakTime(s State, blockTimestamp, parentTimestamp time.Time) time.Duration { if s.childHeight() == s.Network.HardforkASIC.Height-1 { return s.Network.HardforkASIC.OakTime @@ -55,6 +141,17 @@ func updateOakTarget(s State) types.BlockID { return addTarget(mulTargetFrac(s.OakTarget, 1000, 995), s.ChildTarget) } +func updateOakWork(s State) (Work, types.BlockID) { + // prior to the hardfork, we compute the work from the target; after the + // hardfork, we do the opposite + if s.childHeight() < s.Network.HardforkV2.AllowHeight { + target := invTarget(updateOakTarget(s)) + return Work{target}, target + } + work := s.OakWork.sub(s.OakWork.div64(200)).add(s.Difficulty) + return work, invTarget(work.n) +} + func adjustTarget(s State, blockTimestamp time.Time, targetTimestamp time.Time) types.BlockID { blockInterval := int64(s.BlockInterval() / time.Second) @@ -80,8 +177,8 @@ func adjustTarget(s State, blockTimestamp time.Time, targetTimestamp time.Time) return mulTargetFrac(s.ChildTarget, elapsed, expected) } + // same as adjustDifficulty, just a bit hairier oakTotalTime := int64(s.OakTime / time.Second) - var delta int64 if s.Index.Height < s.Network.HardforkOak.FixHeight { delta = (blockInterval * int64(s.Index.Height)) - oakTotalTime @@ -89,37 +186,18 @@ func adjustTarget(s State, blockTimestamp time.Time, targetTimestamp time.Time) parentTimestamp := s.PrevTimestamps[0] delta = (blockInterval * int64(s.Index.Height)) - (parentTimestamp.Unix() - s.Network.HardforkOak.GenesisTimestamp.Unix()) } - - // square the delta and preserve its sign shift := delta * delta if delta < 0 { shift = -shift } - // scale such that a delta of 10,000 produces a shift of 10 seconds shift *= 10 shift /= 10000 * 10000 - - // calculate the new target block time, clamped to a factor of 3 targetBlockTime := blockInterval + shift if min := blockInterval / 3; targetBlockTime < min { targetBlockTime = min } else if max := blockInterval * 3; targetBlockTime > max { targetBlockTime = max } - - // calculate the new target - // - // NOTE: this *should* be as simple as: - // - // newTarget := mulTargetFrac(s.OakTarget, oakTotalTime, targetBlockTime) - // - // However, the siad consensus code includes maxTarget divisions, resulting - // in slightly different rounding, which we must preserve here. First, we - // calculate the estimated hashrate from the (decayed) total work and the - // (decayed, clamped) total time. We then multiply by the target block time - // to get the expected number of hashes required to produce the next block, - // i.e. the new difficulty. Finally, we divide maxTarget by the difficulty - // to get the new target. if oakTotalTime <= 0 { oakTotalTime = 1 } @@ -129,15 +207,10 @@ func adjustTarget(s State, blockTimestamp time.Time, targetTimestamp time.Time) estimatedHashrate := new(big.Int).Div(maxTarget, new(big.Int).SetBytes(s.OakTarget[:])) estimatedHashrate.Div(estimatedHashrate, big.NewInt(oakTotalTime)) estimatedHashrate.Mul(estimatedHashrate, big.NewInt(targetBlockTime)) - if estimatedHashrate.BitLen() == 0 { - estimatedHashrate = big.NewInt(1) + if estimatedHashrate.Sign() == 0 { + estimatedHashrate.SetInt64(1) } newTarget := intToTarget(new(big.Int).Div(maxTarget, estimatedHashrate)) - - // clamp the adjustment to 0.4%, except for ASIC hardfork block - // - // NOTE: the multiplications are flipped re: siad because we are comparing - // work, not targets if s.childHeight() == s.Network.HardforkASIC.Height { return newTarget } @@ -151,6 +224,56 @@ func adjustTarget(s State, blockTimestamp time.Time, targetTimestamp time.Time) return newTarget } +func adjustDifficulty(s State, blockTimestamp time.Time, targetTimestamp time.Time) (Work, types.BlockID) { + // prior to the hardfork, we compute the work from the target; after the + // hardfork, we do the opposite + if s.childHeight() < s.Network.HardforkV2.AllowHeight { + target := adjustTarget(s, blockTimestamp, targetTimestamp) + return Work{invTarget(target)}, target + } + + expectedTime := s.BlockInterval() * time.Duration(s.childHeight()) + actualTime := blockTimestamp.Sub(s.Network.HardforkOak.GenesisTimestamp) + delta := expectedTime - actualTime + // square the delta, scaling such that a delta of 10,000 produces a shift of + // 10 seconds, + shift := 10 * (delta / 10000) * (delta / 10000) + // preserve sign + if delta < 0 { + shift = -shift + } + + // calculate the new target block time, clamped to a factor of 3 + targetBlockTime := s.BlockInterval() + shift + if min := s.BlockInterval() / 3; targetBlockTime < min { + targetBlockTime = min + } else if max := s.BlockInterval() * 3; targetBlockTime > max { + targetBlockTime = max + } + + // estimate current hashrate + // + // NOTE: to prevent overflow/truncation, we operate in terms of seconds + if s.OakTime <= time.Second { + s.OakTime = time.Second + } + estimatedHashrate := s.OakWork.div64(uint64(s.OakTime / time.Second)) + + // multiply the hashrate by the target block time; this is the expected + // number of hashes required to produce the next block, i.e. the new + // difficulty + newDifficulty := estimatedHashrate.mul64(uint64(targetBlockTime / time.Second)) + + // clamp the adjustment to 0.4% + maxAdjust := s.Difficulty.div64(250) + if min := s.Difficulty.sub(maxAdjust); newDifficulty.Cmp(min) < 0 { + newDifficulty = min + } else if max := s.Difficulty.add(maxAdjust); newDifficulty.Cmp(max) > 0 { + newDifficulty = max + } + return newDifficulty, invTarget(newDifficulty.n) +} + // ApplyOrphan applies the work of b to s, returning the resulting state. Only // the PoW-related fields are updated. func ApplyOrphan(s State, b types.Block, targetTimestamp time.Time) State { @@ -161,19 +284,18 @@ func ApplyOrphan(s State, b types.Block, targetTimestamp time.Time) State { if b.ParentID == (types.BlockID{}) { // special handling for genesis block s.OakTime = updateOakTime(s, b.Timestamp, b.Timestamp) - s.OakTarget = updateOakTarget(s) + s.OakWork, s.OakTarget = updateOakWork(s) s.Index = types.ChainIndex{Height: 0, ID: b.ID()} } else { - s.Depth = addTarget(s.Depth, s.ChildTarget) - s.ChildTarget = adjustTarget(s, b.Timestamp, targetTimestamp) + s.TotalWork, s.Depth = updateTotalWork(s) + s.Difficulty, s.ChildTarget = adjustDifficulty(s, b.Timestamp, targetTimestamp) s.OakTime = updateOakTime(s, b.Timestamp, s.PrevTimestamps[0]) - s.OakTarget = updateOakTarget(s) + s.OakWork, s.OakTarget = updateOakWork(s) s.Index = types.ChainIndex{Height: s.Index.Height + 1, ID: b.ID()} } copy(s.PrevTimestamps[1:], s.PrevTimestamps[:]) s.PrevTimestamps[0] = b.Timestamp return s - } func (ms *MidState) addedLeaf(id types.Hash256) *elementLeaf { From f9898b2ab4a1aa8fe5507bd5ae310eb529448ec2 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sat, 19 Aug 2023 00:03:47 -0400 Subject: [PATCH 27/53] consensus: Fix elementLeaf aliasing --- consensus/merkle.go | 4 -- consensus/update.go | 82 +++++++++++++++++++++++------------------ consensus/validation.go | 3 -- 3 files changed, 46 insertions(+), 43 deletions(-) diff --git a/consensus/merkle.go b/consensus/merkle.go index fca03339..1b06343d 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -413,10 +413,6 @@ func (acc *ElementAccumulator) applyBlock(updated, added []elementLeaf) (eau Ele // elements. All of the elements will be marked unspent. The accumulator itself // is not modified. func (acc *ElementAccumulator) revertBlock(updated []elementLeaf) (eru ElementRevertUpdate) { - for i := range updated { - // reverting a block can never cause an element to become spent - updated[i].Spent = false - } eru.updated = updateLeaves(updated) eru.numLeaves = acc.NumLeaves return diff --git a/consensus/update.go b/consensus/update.go index 89940c63..e4b82e9a 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -298,52 +298,34 @@ func ApplyOrphan(s State, b types.Block, targetTimestamp time.Time) State { return s } -func (ms *MidState) addedLeaf(id types.Hash256) *elementLeaf { - for i := range ms.added { - if ms.added[i].ID == id { - return &ms.added[i] - } - } - return nil -} - func (ms *MidState) addSiacoinElement(sce types.SiacoinElement) { ms.sces = append(ms.sces, sce) - ms.added = append(ms.added, siacoinLeaf(&ms.sces[len(ms.sces)-1], false)) ms.ephemeral[ms.sces[len(ms.sces)-1].ID] = len(ms.sces) - 1 } func (ms *MidState) spendSiacoinElement(sce types.SiacoinElement, txid types.TransactionID) { ms.spends[sce.ID] = txid - if _, ok := ms.ephemeral[sce.ID]; ok { - ms.addedLeaf(sce.ID).Spent = true - } else { + if _, ok := ms.ephemeral[sce.ID]; !ok { sce.MerkleProof = append([]types.Hash256(nil), sce.MerkleProof...) ms.sces = append(ms.sces, sce) - ms.updated = append(ms.updated, siacoinLeaf(&ms.sces[len(ms.sces)-1], true)) } } func (ms *MidState) addSiafundElement(sfe types.SiafundElement) { ms.sfes = append(ms.sfes, sfe) - ms.added = append(ms.added, siafundLeaf(&ms.sfes[len(ms.sfes)-1], false)) ms.ephemeral[ms.sfes[len(ms.sfes)-1].ID] = len(ms.sfes) - 1 } func (ms *MidState) spendSiafundElement(sfe types.SiafundElement, txid types.TransactionID) { ms.spends[sfe.ID] = txid - if _, ok := ms.ephemeral[sfe.ID]; ok { - ms.addedLeaf(sfe.ID).Spent = true - } else { + if _, ok := ms.ephemeral[sfe.ID]; !ok { sfe.MerkleProof = append([]types.Hash256(nil), sfe.MerkleProof...) ms.sfes = append(ms.sfes, sfe) - ms.updated = append(ms.updated, siafundLeaf(&ms.sfes[len(ms.sfes)-1], true)) } } func (ms *MidState) addFileContractElement(fce types.FileContractElement) { ms.fces = append(ms.fces, fce) - ms.added = append(ms.added, fileContractLeaf(&ms.fces[len(ms.fces)-1], false)) ms.ephemeral[ms.fces[len(ms.fces)-1].ID] = len(ms.fces) - 1 ms.siafundPool = ms.siafundPool.Add(ms.base.FileContractTax(fce.FileContract)) } @@ -352,16 +334,9 @@ func (ms *MidState) reviseFileContractElement(fce types.FileContractElement, rev rev.Payout = fce.FileContract.Payout if i, ok := ms.ephemeral[fce.ID]; ok { ms.fces[i].FileContract = rev - *ms.addedLeaf(fce.ID) = fileContractLeaf(&ms.fces[i], false) } else { if r, ok := ms.revs[fce.ID]; ok { r.FileContract = rev - for i := range ms.updated { - if ms.updated[i].ID == fce.ID { - ms.updated[i] = fileContractLeaf(r, false) - break - } - } } else { // store the original fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) @@ -370,7 +345,6 @@ func (ms *MidState) reviseFileContractElement(fce types.FileContractElement, rev fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) fce.FileContract = rev ms.revs[fce.ID] = &fce - ms.updated = append(ms.updated, fileContractLeaf(&fce, false)) } } } @@ -379,12 +353,10 @@ func (ms *MidState) resolveFileContractElement(fce types.FileContractElement, tx ms.spends[fce.ID] = txid fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.fces = append(ms.fces, fce) - ms.updated = append(ms.updated, fileContractLeaf(&ms.fces[len(ms.fces)-1], true)) } func (ms *MidState) addV2FileContractElement(fce types.V2FileContractElement) { ms.v2fces = append(ms.v2fces, fce) - ms.added = append(ms.added, v2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], false)) ms.ephemeral[ms.v2fces[len(ms.v2fces)-1].ID] = len(ms.v2fces) - 1 ms.siafundPool = ms.siafundPool.Add(ms.base.V2FileContractTax(fce.V2FileContract)) } @@ -392,19 +364,16 @@ func (ms *MidState) addV2FileContractElement(fce types.V2FileContractElement) { func (ms *MidState) reviseV2FileContractElement(fce types.V2FileContractElement, rev types.V2FileContract) { fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.v2fces = append(ms.v2fces, fce) - ms.updated = append(ms.updated, fileContractLeaf(&ms.fces[len(ms.fces)-1], false)) } func (ms *MidState) resolveV2FileContractElement(fce types.V2FileContractElement, txid types.TransactionID) { ms.spends[fce.ID] = txid fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.v2fces = append(ms.v2fces, fce) - ms.updated = append(ms.updated, v2FileContractLeaf(&ms.v2fces[len(ms.v2fces)-1], true)) } func (ms *MidState) addAttestationElement(ae types.AttestationElement) { ms.aes = append(ms.aes, ae) - ms.added = append(ms.added, attestationLeaf(&ms.aes[len(ms.aes)-1])) } // ApplyTransaction applies a transaction to the MidState. @@ -604,7 +573,29 @@ func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { StateElement: types.StateElement{ID: types.Hash256(bid)}, ChainIndex: types.ChainIndex{Height: ms.base.childHeight(), ID: bid}, } - ms.added = append(ms.added, chainIndexLeaf(&ms.cie)) +} + +func (ms *MidState) forEachElementLeaf(fn func(elementLeaf)) { + for i := range ms.sces { + fn(siacoinLeaf(&ms.sces[i], ms.isSpent(ms.sces[i].ID))) + } + for i := range ms.sfes { + fn(siafundLeaf(&ms.sfes[i], ms.isSpent(ms.sfes[i].ID))) + } + for i := range ms.fces { + if r, ok := ms.revs[ms.fces[i].ID]; ok { + fn(fileContractLeaf(r, ms.isSpent(ms.fces[i].ID))) + } else { + fn(fileContractLeaf(&ms.fces[i], ms.isSpent(ms.fces[i].ID))) + } + } + for i := range ms.v2fces { + fn(v2FileContractLeaf(&ms.v2fces[i], ms.isSpent(ms.v2fces[i].ID))) + } + for i := range ms.aes { + fn(attestationLeaf(&ms.aes[i])) + } + fn(chainIndexLeaf(&ms.cie)) } // An ApplyUpdate represents the effects of applying a block to a state. @@ -654,7 +645,17 @@ func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp ti s.Attestations += uint64(len(ms.aes)) s.FoundationPrimaryAddress = ms.foundationPrimary s.FoundationFailsafeAddress = ms.foundationFailsafe - eau := s.Elements.applyBlock(ms.updated, ms.added) + + // compute updated and added elements + var updated, added []elementLeaf + ms.forEachElementLeaf(func(el elementLeaf) { + if el.MerkleProof == nil { + added = append(added, el) + } else { + updated = append(updated, el) + } + }) + eau := s.Elements.applyBlock(updated, added) s = ApplyOrphan(s, b, targetTimestamp) return s, ApplyUpdate{eau, ms} } @@ -697,6 +698,15 @@ func RevertBlock(s State, b types.Block, bs V1BlockSupplement) RevertUpdate { } ms := NewMidState(s) ms.ApplyBlock(b, bs) - eru := s.Elements.revertBlock(ms.updated) + + // compute updated elements + var updated []elementLeaf + ms.forEachElementLeaf(func(el elementLeaf) { + if el.MerkleProof != nil { + el.Spent = false // reverting a block can never cause an element to become spent + updated = append(updated, el) + } + }) + eru := s.Elements.revertBlock(updated) return RevertUpdate{eru, ms} } diff --git a/consensus/validation.go b/consensus/validation.go index 3dbd5cc2..17d0c5f9 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -112,9 +112,6 @@ type MidState struct { v2fces []types.V2FileContractElement aes []types.AttestationElement cie types.ChainIndexElement - // these alias the above - updated []elementLeaf - added []elementLeaf } // Index returns the index of the MidState's base state. From 6cdd56483a0545af237739abd8c7945359b1c425 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Wed, 23 Aug 2023 22:06:37 -0400 Subject: [PATCH 28/53] types: Change v2 address algorithm This makes threshold policy addresses more like a Merkle root; the address is a hash of the addresses of the sub-policies. Furthermore, you can replace any sub-policy with an "opaque" version that has the same hash, but reveals no information. This is good for privacy; it also saves some space in policies with big, unused sub-policies. But that's actually not the primary motivation here. The primary motivation is simplifying validation! When we validate a pubkey spend policy, we check it against *every* signature provided alongside the input. If we didn't -- if we instead only checked the *next* signature -- we would reject some inputs with valid signatures. Consider: thresh(1, [thresh(1, [pk(A), pk(B)]), pk(C)]) i.e. "either A or B or C." Now imagine we have an input with one signature from key C. Clearly this should be valid. But if we only checked the *next* signature, we would reject it: first we would check against A; after that failed, we would discard the signature, and never reach C. Another strategy would be to only discard after hitting a valid signature. That way, the signature from C would be checked against A, B, and C, ultimately succeeding. But this approach fails on a different case: thresh(1, [above(3), pk(A), pk(B)]) Now, which signatures should we provide? It depends on whether the height is above 3. If so, we should provide a signature from A; otherwise, a signature from B. This isn't the end of the world -- changes in height are allowed to invalidate transactions, after all -- but it does feel a bit user-hostile. Plus, it's wasteful: the signature is checked three times instead of once. The new approach fixes these problem by having the transaction signer specify which sub-policies they intend to satisfy. They do this by replacing the unsatisfied sub-policies with "opaque" policies. The validator can then use a much simpler strategy for handling signatures: the slice of signatures provided in the input should exactly match a depth-first traversal of the policy. Each signature only needs to be checked once, and failure to verify can be treated as a fatal error. Anyway, since this stuff is only relevant to complex policies, I was careful to ensure that the standard single- pubkey policy is still simple and cheap to encode -- it should only require one hash. (In fact, the algorithm for non-threshold policies hasn't changed at all.) One last note: since the opaque hash is actually the same as the policy's address, there is potential privacy leak: if you use the same keypair in two different policies, then anyone can tell that they are connected, even if one of them is "opaque." This doesn't seem like a big deal; you shouldn't be reusing keypairs like that anyway, and if you *really* want to, you can always wrap it in a thresh(1) or an above(0) or something to change the opaque hash. --- consensus/validation.go | 28 +++++++++--- types/encoding.go | 66 ++++++++++++++++------------ types/policy.go | 45 ++++++++++++++++--- types/policy_test.go | 95 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 196 insertions(+), 38 deletions(-) create mode 100644 types/policy_test.go diff --git a/consensus/validation.go b/consensus/validation.go index 17d0c5f9..75079dcd 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -29,6 +29,16 @@ func validateMinerPayouts(s State, b types.Block) error { var overflow bool for _, txn := range b.Transactions { for _, fee := range txn.MinerFees { + // NOTE: it's unclear why this check was implemented in siad; the + // length of txn.MinerFees isn't checked, so it's still possible for + // a transaction to have zero total fees. There's never a *reason* + // to specify a zero-valued miner fee -- it's a developer error -- + // but it's also not invalid or dangerous with regard to consensus. + // Most likely, this check stems from a general policy against + // creating zero-valued outputs, even though the miner payout output + // is an aggregate of *all* fees (plus the block reward) and thus + // will never be zero-valued anyway. In any case, this check is moot + // in v2, where transactions have a single MinerFee, not a slice. if fee.IsZero() { return errors.New("transaction fee has zero value") } @@ -653,6 +663,12 @@ func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { } func validateSpendPolicy(s State, p types.SpendPolicy, sigHash types.Hash256, sigs []types.Signature) error { + nextSig := func() (sig types.Signature, ok bool) { + if ok = len(sigs) > 0; ok { + sig, sigs = sigs[0], sigs[1:] + } + return + } var verify func(types.SpendPolicy) error verify = func(p types.SpendPolicy) error { switch p := p.Type.(type) { @@ -662,13 +678,11 @@ func validateSpendPolicy(s State, p types.SpendPolicy, sigHash types.Hash256, si } return fmt.Errorf("height not above %v", uint64(p)) case types.PolicyTypePublicKey: - for i := range sigs { - if types.PublicKey(p).VerifyHash(sigHash, sigs[i]) { - sigs = sigs[i+1:] - return nil - } + sig, ok := nextSig() + if ok && types.PublicKey(p).VerifyHash(sigHash, sig) { + return nil } - return errors.New("no signatures matching pubkey") + return errors.New("signature does not match pubkey") case types.PolicyTypeThreshold: for i := 0; i < len(p.Of) && p.N > 0 && len(p.Of[i:]) >= int(p.N); i++ { if verify(p.Of[i]) == nil { @@ -679,6 +693,8 @@ func validateSpendPolicy(s State, p types.SpendPolicy, sigHash types.Hash256, si return nil } return errors.New("threshold not reached") + case types.PolicyTypeOpaque: + return errors.New("opaque policy") case types.PolicyTypeUnlockConditions: if err := verify(types.PolicyAbove(p.Timelock)); err != nil { return err diff --git a/types/encoding.go b/types/encoding.go index a1bbc098..5d7fa10f 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -438,44 +438,48 @@ func (txn *Transaction) encodeNoSignatures(e *Encoder) { } } -// EncodeTo implements types.EncoderTo. -func (p SpendPolicy) EncodeTo(e *Encoder) { +func (p SpendPolicy) encodePolicy(e *Encoder) { const ( - version = 1 - opInvalid = iota opAbove opPublicKey opThreshold + opOpaque opUnlockConditions ) - - var writePolicy func(SpendPolicy) - writePolicy = func(p SpendPolicy) { - switch p := p.Type.(type) { - case PolicyTypeAbove: - e.WriteUint8(opAbove) - e.WriteUint64(uint64(p)) - case PolicyTypePublicKey: - e.WriteUint8(opPublicKey) - PublicKey(p).EncodeTo(e) - case PolicyTypeThreshold: - e.WriteUint8(opThreshold) - e.WriteUint8(p.N) - e.WriteUint8(uint8(len(p.Of))) - for i := range p.Of { - writePolicy(p.Of[i]) + switch p := p.Type.(type) { + case PolicyTypeAbove: + e.WriteUint8(opAbove) + e.WriteUint64(uint64(p)) + case PolicyTypePublicKey: + e.WriteUint8(opPublicKey) + PublicKey(p).EncodeTo(e) + case PolicyTypeThreshold: + e.WriteUint8(opThreshold) + e.WriteUint8(p.N) + e.WriteUint8(uint8(len(p.Of))) + for i := range p.Of { + if _, ok := p.Of[i].Type.(PolicyTypeUnlockConditions); ok { + panic("unlock condition policies cannot be composed") } - case PolicyTypeUnlockConditions: - e.WriteUint8(opUnlockConditions) - UnlockConditions(p).EncodeTo(e) - default: - panic(fmt.Sprintf("unhandled policy type %T", p)) + p.Of[i].encodePolicy(e) } + case PolicyTypeOpaque: + e.WriteUint8(opOpaque) + Hash256(p).EncodeTo(e) + case PolicyTypeUnlockConditions: + e.WriteUint8(opUnlockConditions) + UnlockConditions(p).EncodeTo(e) + default: + panic(fmt.Sprintf("unhandled policy type %T", p)) } +} +// EncodeTo implements types.EncoderTo. +func (p SpendPolicy) EncodeTo(e *Encoder) { + const version = 1 e.WriteUint8(version) - writePolicy(p) + p.encodePolicy(e) } // EncodeTo implements types.EncoderTo. @@ -976,11 +980,13 @@ func (p *SpendPolicy) DecodeFrom(d *Decoder) { const ( version = 1 maxPolicies = 1024 - + ) + const ( opInvalid = iota opAbove opPublicKey opThreshold + opOpaque opUnlockConditions ) @@ -1005,9 +1011,15 @@ func (p *SpendPolicy) DecodeFrom(d *Decoder) { of[i], err = readPolicy() if err != nil { return SpendPolicy{}, err + } else if _, ok := of[i].Type.(PolicyTypeUnlockConditions); ok { + return SpendPolicy{}, errors.New("unlock condition policies cannot be composed") } } return PolicyThreshold(n, of), nil + case opOpaque: + var p PolicyTypeOpaque + (*Address)(&p).DecodeFrom(d) + return SpendPolicy{p}, nil case opUnlockConditions: var uc UnlockConditions uc.DecodeFrom(d) diff --git a/types/policy.go b/types/policy.go index ef45cd7a..d8480918 100644 --- a/types/policy.go +++ b/types/policy.go @@ -39,11 +39,24 @@ type PolicyTypeThreshold struct { } // PolicyThreshold returns a policy that requires at least N sub-policies to be -// satisfied. +// satisfied. When satisfying a threshold policy, all unsatisfied sub-policies +// must be replaced with PolicyOpaque. func PolicyThreshold(n uint8, of []SpendPolicy) SpendPolicy { return SpendPolicy{PolicyTypeThreshold{n, of}} } +// PolicyTypeOpaque is the opaque hash of a policy. It is not satisfiable. +type PolicyTypeOpaque Address + +// PolicyOpaque returns a policy with the same address as p, but without its +// semantics. +func PolicyOpaque(p SpendPolicy) SpendPolicy { + if _, ok := p.Type.(PolicyTypeOpaque); ok { + return p + } + return SpendPolicy{PolicyTypeOpaque(p.Address())} +} + // AnyoneCanSpend returns a policy that has no requirements. func AnyoneCanSpend() SpendPolicy { return PolicyThreshold(0, nil) @@ -57,6 +70,7 @@ type PolicyTypeUnlockConditions UnlockConditions func (PolicyTypeAbove) isPolicy() {} func (PolicyTypePublicKey) isPolicy() {} func (PolicyTypeThreshold) isPolicy() {} +func (PolicyTypeOpaque) isPolicy() {} func (PolicyTypeUnlockConditions) isPolicy() {} // Address computes the opaque address for a given policy. @@ -70,6 +84,13 @@ func (p SpendPolicy) Address() Address { defer hasherPool.Put(h) h.Reset() h.E.WriteString("sia/address|") + if pt, ok := p.Type.(PolicyTypeThreshold); ok { + pt.Of = append([]SpendPolicy(nil), pt.Of...) + for i := range pt.Of { + pt.Of[i] = PolicyOpaque(pt.Of[i]) + } + p = SpendPolicy{pt} + } p.EncodeTo(h.E) return Address(h.Sum()) } @@ -77,6 +98,10 @@ func (p SpendPolicy) Address() Address { // String implements fmt.Stringer. func (p SpendPolicy) String() string { var sb strings.Builder + writeHex := func(p []byte) { + sb.WriteString("0x") + sb.WriteString(hex.EncodeToString(p)) + } switch p := p.Type.(type) { case PolicyTypeAbove: sb.WriteString("above(") @@ -85,7 +110,7 @@ func (p SpendPolicy) String() string { case PolicyTypePublicKey: sb.WriteString("pk(") - sb.WriteString(hex.EncodeToString(p[:])) + writeHex(p[:]) sb.WriteByte(')') case PolicyTypeThreshold: @@ -100,6 +125,11 @@ func (p SpendPolicy) String() string { } sb.WriteString("])") + case PolicyTypeOpaque: + sb.WriteString("opaque(") + writeHex(p[:]) + sb.WriteByte(')') + case PolicyTypeUnlockConditions: sb.WriteString("uc(") sb.WriteString(strconv.FormatUint(p.Timelock, 10)) @@ -108,7 +138,7 @@ func (p SpendPolicy) String() string { if i > 0 { sb.WriteByte(',') } - sb.WriteString(hex.EncodeToString(pk.Key[:])) + writeHex(pk.Key[:]) } sb.WriteString("],") sb.WriteString(strconv.FormatUint(uint64(p.SignaturesRequired), 10)) @@ -161,11 +191,14 @@ func ParseSpendPolicy(s string) (SpendPolicy, error) { t := nextToken() if err != nil { return - } else if len(t) != 64 { + } else if len(t) != 66 { err = fmt.Errorf("invalid pubkey length (%d)", len(t)) return + } else if t[:2] != "0x" { + err = fmt.Errorf("invalid pubkey prefix %q", t[:2]) + return } - _, err = hex.Decode(pk[:], []byte(t)) + _, err = hex.Decode(pk[:], []byte(t[2:])) return } var parseSpendPolicy func() SpendPolicy @@ -191,6 +224,8 @@ func ParseSpendPolicy(s string) (SpendPolicy, error) { } consume(']') return PolicyThreshold(uint8(n), of) + case "opaque": + return SpendPolicy{PolicyTypeOpaque(parsePubkey())} case "uc": timelock := parseInt(64) consume(',') diff --git a/types/policy_test.go b/types/policy_test.go new file mode 100644 index 00000000..fb4cd69a --- /dev/null +++ b/types/policy_test.go @@ -0,0 +1,95 @@ +package types + +import ( + "bytes" + "testing" +) + +func roundtrip(from EncoderTo, to DecoderFrom) { + var buf bytes.Buffer + e := NewEncoder(&buf) + from.EncodeTo(e) + e.Flush() + d := NewBufDecoder(buf.Bytes()) + to.DecodeFrom(d) + if d.Err() != nil { + panic(d.Err()) + } +} + +func TestPolicyGolden(t *testing.T) { + p := SpendPolicy{PolicyTypeUnlockConditions{ + PublicKeys: []UnlockKey{PublicKey{1, 2, 3}.UnlockKey()}, + }} + if p.Address().String() != "addr:9ca6476864f75dff7908dadf137fb0e8044213f49935428adcf1070c71f512c62462150f0186" { + t.Fatal("wrong address:", p, p.Address()) + } + + p = PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(100), + PolicyPublicKey(PublicKey{1, 2, 3}), + PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(200), + PolicyPublicKey(PublicKey{4, 5, 6}), + }), + }) + if p.Address().String() != "addr:6079542a7cdabf033c500a3f49955e1b54788f48d7da08a84617236124540f958833ddf29445" { + t.Fatal("wrong address:", p, p.Address()) + } +} + +func TestPolicyOpaque(t *testing.T) { + sub := []SpendPolicy{ + PolicyAbove(100), + PolicyPublicKey(PublicKey{1, 2, 3}), + PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(200), + PolicyPublicKey(PublicKey{4, 5, 6}), + }), + } + p := PolicyThreshold(2, sub) + addr := p.Address() + + for i := range sub { + sub[i] = PolicyOpaque(sub[i]) + p = PolicyThreshold(2, sub) + if p.Address() != addr { + t.Fatal("opaque policy should have same address") + } + } +} + +func TestPolicyRoundtrip(t *testing.T) { + for _, p := range []SpendPolicy{ + PolicyAbove(100), + + PolicyPublicKey(PublicKey{1, 2, 3}), + + PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(100), + PolicyPublicKey(PublicKey{1, 2, 3}), + PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(200), + PolicyPublicKey(PublicKey{4, 5, 6}), + }), + }), + + PolicyOpaque(PolicyPublicKey(PublicKey{1, 2, 3})), + + {PolicyTypeUnlockConditions{PublicKeys: []UnlockKey{PublicKey{1, 2, 3}.UnlockKey()}}}, + } { + var p2 SpendPolicy + roundtrip(p, &p2) + if p.Address() != p2.Address() { + t.Fatal("policy did not survive roundtrip") + } + + s := p.String() + p2, err := ParseSpendPolicy(s) + if err != nil { + t.Fatal(err) + } else if p.Address() != p2.Address() { + t.Fatal("policy did not survive roundtrip") + } + } +} From 62069cc74e92c6804c69333a1b1e51a7be0f363c Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 24 Aug 2023 10:46:58 -0400 Subject: [PATCH 29/53] types,consensus: Move SpendPolicy verification to types I've gone back and forth on this, but the justification here is that spend policies are their own "thing," separate from Sia consensus. Since they're versioned, we can change behavior by defining a new version and adding a consensus rule banning the old version. --- consensus/validation.go | 62 ++------------------------------------- types/encoding.go | 8 +----- types/policy.go | 64 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+), 67 deletions(-) diff --git a/consensus/validation.go b/consensus/validation.go index 75079dcd..24c21a1b 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -662,64 +662,6 @@ func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { return nil } -func validateSpendPolicy(s State, p types.SpendPolicy, sigHash types.Hash256, sigs []types.Signature) error { - nextSig := func() (sig types.Signature, ok bool) { - if ok = len(sigs) > 0; ok { - sig, sigs = sigs[0], sigs[1:] - } - return - } - var verify func(types.SpendPolicy) error - verify = func(p types.SpendPolicy) error { - switch p := p.Type.(type) { - case types.PolicyTypeAbove: - if s.Index.Height > uint64(p) { - return nil - } - return fmt.Errorf("height not above %v", uint64(p)) - case types.PolicyTypePublicKey: - sig, ok := nextSig() - if ok && types.PublicKey(p).VerifyHash(sigHash, sig) { - return nil - } - return errors.New("signature does not match pubkey") - case types.PolicyTypeThreshold: - for i := 0; i < len(p.Of) && p.N > 0 && len(p.Of[i:]) >= int(p.N); i++ { - if verify(p.Of[i]) == nil { - p.N-- - } - } - if p.N == 0 { - return nil - } - return errors.New("threshold not reached") - case types.PolicyTypeOpaque: - return errors.New("opaque policy") - case types.PolicyTypeUnlockConditions: - if err := verify(types.PolicyAbove(p.Timelock)); err != nil { - return err - } - if p.SignaturesRequired > 255 { - return fmt.Errorf("too many signatures required (%v > 255)", p.SignaturesRequired) - } - n := uint8(p.SignaturesRequired) - of := make([]types.SpendPolicy, len(p.PublicKeys)) - for i, pk := range p.PublicKeys { - if pk.Algorithm != types.SpecifierEd25519 { - return fmt.Errorf("unsupported algorithm %v", pk.Algorithm) - } else if len(pk.Key) != len(types.PublicKey{}) { - return fmt.Errorf("invalid Ed25519 key length %v", len(pk.Key)) - } - of[i] = types.PolicyPublicKey(*(*types.PublicKey)(pk.Key)) - } - return verify(types.PolicyThreshold(n, of)) - default: - panic("invalid policy type") // developer error - } - } - return verify(p) -} - func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { sigHash := ms.base.InputSigHash(txn) for i, sci := range txn.SiacoinInputs { @@ -742,7 +684,7 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { // check spend policy if sci.SpendPolicy.Address() != sci.Parent.Address { return fmt.Errorf("siacoin input %v claims incorrect policy for parent address", i) - } else if err := validateSpendPolicy(ms.base, sci.SpendPolicy, sigHash, sci.Signatures); err != nil { + } else if err := sci.SpendPolicy.Verify(ms.base.Index.Height, sigHash, sci.Signatures); err != nil { return fmt.Errorf("siacoin input %v failed to satisfy spend policy: %w", i, err) } } @@ -798,7 +740,7 @@ func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { // check spend policy if sci.SpendPolicy.Address() != sci.Parent.Address { return fmt.Errorf("siafund input %v claims incorrect policy for parent address", i) - } else if err := validateSpendPolicy(ms.base, sci.SpendPolicy, sigHash, sci.Signatures); err != nil { + } else if err := sci.SpendPolicy.Verify(ms.base.Index.Height, sigHash, sci.Signatures); err != nil { return fmt.Errorf("siafund input %v failed to satisfy spend policy: %w", i, err) } } diff --git a/types/encoding.go b/types/encoding.go index 5d7fa10f..333b5443 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -459,9 +459,6 @@ func (p SpendPolicy) encodePolicy(e *Encoder) { e.WriteUint8(p.N) e.WriteUint8(uint8(len(p.Of))) for i := range p.Of { - if _, ok := p.Of[i].Type.(PolicyTypeUnlockConditions); ok { - panic("unlock condition policies cannot be composed") - } p.Of[i].encodePolicy(e) } case PolicyTypeOpaque: @@ -1008,11 +1005,8 @@ func (p *SpendPolicy) DecodeFrom(d *Decoder) { } var err error for i := range of { - of[i], err = readPolicy() - if err != nil { + if of[i], err = readPolicy(); err != nil { return SpendPolicy{}, err - } else if _, ok := of[i].Type.(PolicyTypeUnlockConditions); ok { - return SpendPolicy{}, errors.New("unlock condition policies cannot be composed") } } return PolicyThreshold(n, of), nil diff --git a/types/policy.go b/types/policy.go index d8480918..c8e81358 100644 --- a/types/policy.go +++ b/types/policy.go @@ -3,6 +3,7 @@ package types import ( "bytes" "encoding/hex" + "errors" "fmt" "io" "strconv" @@ -95,6 +96,69 @@ func (p SpendPolicy) Address() Address { return Address(h.Sum()) } +// Verify verifies that p is satisfied by the supplied inputs. +func (p SpendPolicy) Verify(height uint64, sigHash Hash256, sigs []Signature) error { + nextSig := func() (sig Signature, ok bool) { + if ok = len(sigs) > 0; ok { + sig, sigs = sigs[0], sigs[1:] + } + return + } + errInvalidSignature := errors.New("invalid signature") + var verify func(SpendPolicy) error + verify = func(p SpendPolicy) error { + switch p := p.Type.(type) { + case PolicyTypeAbove: + if height > uint64(p) { + return nil + } + return fmt.Errorf("height not above %v", uint64(p)) + case PolicyTypePublicKey: + sig, ok := nextSig() + if ok && PublicKey(p).VerifyHash(sigHash, sig) { + return nil + } + return errInvalidSignature + case PolicyTypeThreshold: + for i := 0; i < len(p.Of) && p.N > 0 && len(p.Of[i:]) >= int(p.N); i++ { + if _, ok := p.Of[i].Type.(PolicyTypeUnlockConditions); ok { + return errors.New("unlock conditions cannot be sub-policies") + } else if err := verify(p.Of[i]); err == errInvalidSignature { + return err // fatal; should have been opaque + } else if err == nil { + p.N-- + } + } + if p.N == 0 { + return nil + } + return errors.New("threshold not reached") + case PolicyTypeOpaque: + return errors.New("opaque policy") + case PolicyTypeUnlockConditions: + if err := verify(PolicyAbove(p.Timelock)); err != nil { + return err + } else if p.SignaturesRequired > 255 { + return fmt.Errorf("too many signatures required (%v > 255)", p.SignaturesRequired) + } + n := uint8(p.SignaturesRequired) + of := make([]SpendPolicy, len(p.PublicKeys)) + for i, pk := range p.PublicKeys { + if pk.Algorithm != SpecifierEd25519 { + return fmt.Errorf("unsupported algorithm %v", pk.Algorithm) + } else if len(pk.Key) != len(PublicKey{}) { + return fmt.Errorf("invalid Ed25519 key length %v", len(pk.Key)) + } + of[i] = PolicyPublicKey(*(*PublicKey)(pk.Key)) + } + return verify(PolicyThreshold(n, of)) + default: + panic("invalid policy type") // developer error + } + } + return verify(p) +} + // String implements fmt.Stringer. func (p SpendPolicy) String() string { var sb strings.Builder From f9fa0cbf1174b4bdafd0a672c555f53d5dfa7c44 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 24 Aug 2023 11:14:18 -0400 Subject: [PATCH 30/53] types: Add (*Hasher).WriteDistinguisher I only just realized that WriteString was causing us to write an 8-byte length prefix for each distinguisher. This isn't terrible, but it does feel unnecessary. Adding a special method also makes it easier to enforce a consistent prefix format. --- consensus/merkle.go | 12 ++++++------ consensus/state.go | 10 +++++----- types/hash.go | 5 +++++ types/policy.go | 2 +- types/policy_test.go | 5 ++++- types/types.go | 2 +- 6 files changed, 22 insertions(+), 14 deletions(-) diff --git a/consensus/merkle.go b/consensus/merkle.go index 1b06343d..bae23f61 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -78,7 +78,7 @@ func chainIndexLeaf(e *types.ChainIndexElement) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/leaf/chainindex|") + h.WriteDistinguisher("leaf/chainindex") e.StateElement.ID.EncodeTo(h.E) e.ChainIndex.EncodeTo(h.E) return elementLeaf{ @@ -93,7 +93,7 @@ func siacoinLeaf(e *types.SiacoinElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/leaf/siacoin|") + h.WriteDistinguisher("leaf/siacoin") e.ID.EncodeTo(h.E) e.SiacoinOutput.EncodeTo(h.E) h.E.WriteUint64(e.MaturityHeight) @@ -109,7 +109,7 @@ func siafundLeaf(e *types.SiafundElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/leaf/siafund|") + h.WriteDistinguisher("leaf/siafund") e.ID.EncodeTo(h.E) e.SiafundOutput.EncodeTo(h.E) e.ClaimStart.EncodeTo(h.E) @@ -125,7 +125,7 @@ func fileContractLeaf(e *types.FileContractElement, spent bool) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/leaf/filecontract|") + h.WriteDistinguisher("leaf/filecontract") e.ID.EncodeTo(h.E) e.FileContract.EncodeTo(h.E) return elementLeaf{ @@ -140,7 +140,7 @@ func v2FileContractLeaf(e *types.V2FileContractElement, spent bool) elementLeaf h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/leaf/v2filecontract|") + h.WriteDistinguisher("leaf/v2filecontract") e.ID.EncodeTo(h.E) e.V2FileContract.EncodeTo(h.E) return elementLeaf{ @@ -155,7 +155,7 @@ func attestationLeaf(e *types.AttestationElement) elementLeaf { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/leaf/attestation|") + h.WriteDistinguisher("leaf/attestation") e.StateElement.ID.EncodeTo(h.E) e.Attestation.EncodeTo(h.E) return elementLeaf{ diff --git a/consensus/state.go b/consensus/state.go index 28cca9cc..af21cf45 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -520,7 +520,7 @@ func (s State) Commitment(minerAddr types.Address, txns []types.Transaction, v2t // concatenate the hashes and the miner address h.Reset() - h.E.WriteString("sia/commitment|") + h.WriteDistinguisher("commitment") h.E.WriteUint8(s.v2ReplayPrefix()) stateHash.EncodeTo(h.E) minerAddr.EncodeTo(h.E) @@ -535,7 +535,7 @@ func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/id/transaction|") + h.WriteDistinguisher("id/transaction") h.E.WriteUint8(s.v2ReplayPrefix()) h.E.WritePrefix(len(txn.SiacoinInputs)) for _, in := range txn.SiacoinInputs { @@ -589,7 +589,7 @@ func (s State) ContractSigHash(fc types.V2FileContract) types.Hash256 { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/sig/filecontract|") + h.WriteDistinguisher("sig/filecontract") h.E.WriteUint8(s.v2ReplayPrefix()) h.E.WriteUint64(fc.Filesize) fc.FileMerkleRoot.EncodeTo(h.E) @@ -609,7 +609,7 @@ func (s State) RenewalSigHash(fcr types.V2FileContractRenewal) types.Hash256 { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/sig/filecontractrenewal|") + h.WriteDistinguisher("sig/filecontractrenewal") h.E.WriteUint8(s.v2ReplayPrefix()) fcr.FinalRevision.EncodeTo(h.E) fcr.InitialRevision.EncodeTo(h.E) @@ -623,7 +623,7 @@ func (s State) AttestationSigHash(a types.Attestation) types.Hash256 { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/sig/attestation|") + h.WriteDistinguisher("sig/attestation") h.E.WriteUint8(s.v2ReplayPrefix()) a.PublicKey.EncodeTo(h.E) h.E.WriteString(a.Key) diff --git a/types/hash.go b/types/hash.go index 4df2d4ac..458aa3d3 100644 --- a/types/hash.go +++ b/types/hash.go @@ -25,6 +25,11 @@ func (h *Hasher) Reset() { h.h.Reset() } +// WriteDistinguisher writes a distinguisher prefix to the encoder. +func (h *Hasher) WriteDistinguisher(p string) { + h.E.Write([]byte("sia/" + p + "|")) +} + // Sum returns the digest of the objects written to the Hasher. func (h *Hasher) Sum() (sum Hash256) { _ = h.E.Flush() // no error possible diff --git a/types/policy.go b/types/policy.go index c8e81358..42de21c3 100644 --- a/types/policy.go +++ b/types/policy.go @@ -84,7 +84,7 @@ func (p SpendPolicy) Address() Address { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/address|") + h.WriteDistinguisher("address") if pt, ok := p.Type.(PolicyTypeThreshold); ok { pt.Of = append([]SpendPolicy(nil), pt.Of...) for i := range pt.Of { diff --git a/types/policy_test.go b/types/policy_test.go index fb4cd69a..36623fae 100644 --- a/types/policy_test.go +++ b/types/policy_test.go @@ -24,6 +24,9 @@ func TestPolicyGolden(t *testing.T) { if p.Address().String() != "addr:9ca6476864f75dff7908dadf137fb0e8044213f49935428adcf1070c71f512c62462150f0186" { t.Fatal("wrong address:", p, p.Address()) } + if StandardAddress(PublicKey{1, 2, 3}) != PolicyPublicKey(PublicKey{1, 2, 3}).Address() { + t.Fatal("StandardAddress differs from Policy.Address") + } p = PolicyThreshold(2, []SpendPolicy{ PolicyAbove(100), @@ -33,7 +36,7 @@ func TestPolicyGolden(t *testing.T) { PolicyPublicKey(PublicKey{4, 5, 6}), }), }) - if p.Address().String() != "addr:6079542a7cdabf033c500a3f49955e1b54788f48d7da08a84617236124540f958833ddf29445" { + if p.Address().String() != "addr:2fb1e5d351aea601e5b507f1f5e021a6aff363951850983f0d930361d17f8ba507f19a409e21" { t.Fatal("wrong address:", p, p.Address()) } } diff --git a/types/types.go b/types/types.go index 2bb16ec9..c96e7a71 100644 --- a/types/types.go +++ b/types/types.go @@ -675,7 +675,7 @@ func (txn *V2Transaction) ID() TransactionID { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) h.Reset() - h.E.WriteString("sia/id/transaction|") + h.WriteDistinguisher("id/transaction") h.E.WritePrefix(len(txn.SiacoinInputs)) for _, in := range txn.SiacoinInputs { in.Parent.ID.EncodeTo(h.E) From b4b199140223ee17b64d1aedc5ec62edb87a1a6e Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 24 Aug 2023 11:43:25 -0400 Subject: [PATCH 31/53] types: Add policy verification tests --- types/hash.go | 3 +- types/policy_test.go | 197 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 192 insertions(+), 8 deletions(-) diff --git a/types/hash.go b/types/hash.go index 458aa3d3..59991bcc 100644 --- a/types/hash.go +++ b/types/hash.go @@ -63,7 +63,8 @@ func StandardAddress(pk PublicKey) Address { return Address(blake2b.Sum256(buf)) } -// StandardUnlockHash returns the standard UnlockHash derived from pk. +// StandardUnlockHash returns the standard UnlockHash derived from pk. It is equivalent to +// SpendPolicy{PolicyUnlockConditions(StandardUnlockConditions(pk))}.Address(). func StandardUnlockHash(pk PublicKey) Address { // An UnlockHash is the Merkle root of UnlockConditions. Since the standard // UnlockConditions use a single public key, the Merkle tree is: diff --git a/types/policy_test.go b/types/policy_test.go index 36623fae..ad15ae52 100644 --- a/types/policy_test.go +++ b/types/policy_test.go @@ -17,20 +17,203 @@ func roundtrip(from EncoderTo, to DecoderFrom) { } } +func TestPolicyVerify(t *testing.T) { + key := GeneratePrivateKey() + pk := key.PublicKey() + sigHash := Hash256{1, 2, 3} + + for _, test := range []struct { + p SpendPolicy + height uint64 + sigs []Signature + valid bool + }{ + { + PolicyAbove(0), + 0, + nil, + false, + }, + { + PolicyAbove(0), + 1, + nil, + true, + }, + { + PolicyPublicKey(pk), + 1, + nil, + false, + }, + { + PolicyPublicKey(pk), + 1, + []Signature{key.SignHash(Hash256{})}, + false, + }, + { + PolicyPublicKey(pk), + 1, + []Signature{key.SignHash(sigHash)}, + true, + }, + { + PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(10), + PolicyPublicKey(pk), + }), + 1, + []Signature{key.SignHash(sigHash)}, + false, + }, + { + PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(10), + PolicyPublicKey(pk), + }), + 11, + nil, + false, + }, + { + PolicyThreshold(2, []SpendPolicy{ + PolicyAbove(10), + PolicyPublicKey(pk), + }), + 11, + []Signature{key.SignHash(sigHash)}, + true, + }, + { + PolicyThreshold(1, []SpendPolicy{ + PolicyAbove(10), + PolicyPublicKey(pk), + }), + 11, + []Signature{key.SignHash(sigHash)}, + true, + }, + { + PolicyThreshold(1, []SpendPolicy{ + PolicyAbove(10), + PolicyOpaque(PolicyPublicKey(pk)), + }), + 11, + []Signature{key.SignHash(sigHash)}, + true, + }, + { + PolicyThreshold(1, []SpendPolicy{ + PolicyOpaque(PolicyAbove(10)), + PolicyPublicKey(pk), + }), + 11, + []Signature{key.SignHash(sigHash)}, + true, + }, + { + PolicyThreshold(1, []SpendPolicy{ + PolicyOpaque(PolicyAbove(10)), + PolicyOpaque(PolicyPublicKey(pk)), + }), + 11, + []Signature{key.SignHash(sigHash)}, + false, + }, + { + PolicyThreshold(1, []SpendPolicy{ + {PolicyTypeUnlockConditions{ + PublicKeys: []UnlockKey{pk.UnlockKey()}, + SignaturesRequired: 1, + }}, + }), + 1, + []Signature{key.SignHash(sigHash)}, + false, + }, + { + SpendPolicy{PolicyTypeUnlockConditions{ + Timelock: 10, + }}, + 1, + nil, + false, + }, + { + SpendPolicy{PolicyTypeUnlockConditions{ + SignaturesRequired: 1000, + }}, + 1, + nil, + false, + }, + { + SpendPolicy{PolicyTypeUnlockConditions{ + PublicKeys: []UnlockKey{{ + Algorithm: SpecifierEntropy, + Key: nil, + }}, + SignaturesRequired: 1, + }}, + 1, + []Signature{key.SignHash(sigHash)}, + false, + }, + { + SpendPolicy{PolicyTypeUnlockConditions{ + PublicKeys: []UnlockKey{{ + Algorithm: SpecifierEd25519, + Key: nil, + }}, + SignaturesRequired: 1, + }}, + 1, + []Signature{key.SignHash(sigHash)}, + false, + }, + { + SpendPolicy{PolicyTypeUnlockConditions{ + PublicKeys: []UnlockKey{pk.UnlockKey()}, + SignaturesRequired: 2, + }}, + 1, + []Signature{key.SignHash(sigHash)}, + false, + }, + { + SpendPolicy{PolicyTypeUnlockConditions{ + PublicKeys: []UnlockKey{pk.UnlockKey()}, + SignaturesRequired: 1, + }}, + 1, + []Signature{key.SignHash(sigHash)}, + true, + }, + } { + if err := test.p.Verify(test.height, sigHash, test.sigs); err != nil && test.valid { + t.Fatal(err) + } else if err == nil && !test.valid { + t.Fatal("expected error") + } + } +} + func TestPolicyGolden(t *testing.T) { - p := SpendPolicy{PolicyTypeUnlockConditions{ - PublicKeys: []UnlockKey{PublicKey{1, 2, 3}.UnlockKey()}, - }} - if p.Address().String() != "addr:9ca6476864f75dff7908dadf137fb0e8044213f49935428adcf1070c71f512c62462150f0186" { + pk := PublicKey{1, 2, 3} + p := SpendPolicy{PolicyTypeUnlockConditions(StandardUnlockConditions(pk))} + if p.Address().String() != "addr:72b0762b382d4c251af5ae25b6777d908726d75962e5224f98d7f619bb39515dd64b9a56043a" { t.Fatal("wrong address:", p, p.Address()) + } else if StandardUnlockHash(pk) != p.Address() { + t.Fatal("StandardUnlockHash differs from Policy.Address") } - if StandardAddress(PublicKey{1, 2, 3}) != PolicyPublicKey(PublicKey{1, 2, 3}).Address() { + if StandardAddress(pk) != PolicyPublicKey(pk).Address() { t.Fatal("StandardAddress differs from Policy.Address") } p = PolicyThreshold(2, []SpendPolicy{ PolicyAbove(100), - PolicyPublicKey(PublicKey{1, 2, 3}), + PolicyPublicKey(pk), PolicyThreshold(2, []SpendPolicy{ PolicyAbove(200), PolicyPublicKey(PublicKey{4, 5, 6}), @@ -79,7 +262,7 @@ func TestPolicyRoundtrip(t *testing.T) { PolicyOpaque(PolicyPublicKey(PublicKey{1, 2, 3})), - {PolicyTypeUnlockConditions{PublicKeys: []UnlockKey{PublicKey{1, 2, 3}.UnlockKey()}}}, + {PolicyTypeUnlockConditions{PublicKeys: []UnlockKey{PublicKey{1, 2, 3}.UnlockKey(), PublicKey{4, 5, 6}.UnlockKey()}}}, } { var p2 SpendPolicy roundtrip(p, &p2) From 61acf7388880d71718646d19c918b48b94a1d23b Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 24 Aug 2023 17:44:58 -0400 Subject: [PATCH 32/53] types: Add (*Block).V2Transactions helper --- chain/manager.go | 6 ++---- consensus/update.go | 6 ++---- consensus/validation.go | 6 ++---- types/hash.go | 8 +------- types/types.go | 18 +++++++++++------- 5 files changed, 18 insertions(+), 26 deletions(-) diff --git a/chain/manager.go b/chain/manager.go index d259fcfd..13bb1ec1 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -586,10 +586,8 @@ func (m *Manager) computeMedianFee() types.Currency { for _, txn := range b.Transactions { fees = append(fees, weightedFee{cs.TransactionWeight(txn), txn.TotalFees()}) } - if b.V2 != nil { - for _, txn := range b.V2.Transactions { - fees = append(fees, weightedFee{cs.V2TransactionWeight(txn), txn.MinerFee}) - } + for _, txn := range b.V2Transactions() { + fees = append(fees, weightedFee{cs.V2TransactionWeight(txn), txn.MinerFee}) } // account for the remaining space in the block, for which no fees were paid remaining := cs.MaxBlockWeight() diff --git a/consensus/update.go b/consensus/update.go index e4b82e9a..f3809500 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -535,10 +535,8 @@ func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { for i, txn := range b.Transactions { ms.ApplyTransaction(txn, bs.Transactions[i]) } - if b.V2 != nil { - for _, txn := range b.V2.Transactions { - ms.ApplyV2Transaction(txn) - } + for _, txn := range b.V2Transactions() { + ms.ApplyV2Transaction(txn) } bid := b.ID() for i, sco := range b.MinerPayouts { diff --git a/consensus/validation.go b/consensus/validation.go index 24c21a1b..86ec7d00 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -82,10 +82,8 @@ func ValidateOrphan(s State, b types.Block) error { for _, txn := range b.Transactions { weight += s.TransactionWeight(txn) } - if b.V2 != nil { - for _, txn := range b.V2.Transactions { - weight += s.V2TransactionWeight(txn) - } + for _, txn := range b.V2Transactions() { + weight += s.V2TransactionWeight(txn) } if weight > s.MaxBlockWeight() { return fmt.Errorf("block exceeds maximum weight (%v > %v)", weight, s.MaxBlockWeight()) diff --git a/types/hash.go b/types/hash.go index 59991bcc..5f69e2a2 100644 --- a/types/hash.go +++ b/types/hash.go @@ -125,7 +125,7 @@ func unlockConditionsRoot(uc UnlockConditions) Address { return acc.Root() } -func blockMerkleRoot(minerPayouts []SiacoinOutput, txns []Transaction, v2txns []V2Transaction) Hash256 { +func blockMerkleRoot(minerPayouts []SiacoinOutput, txns []Transaction) Hash256 { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) var acc blake2b.Accumulator @@ -141,11 +141,5 @@ func blockMerkleRoot(minerPayouts []SiacoinOutput, txns []Transaction, v2txns [] txn.EncodeTo(h.E) acc.AddLeaf(h.Sum()) } - for _, txn := range v2txns { - h.Reset() - h.E.WriteUint8(leafHashPrefix) - txn.EncodeTo(h.E) - acc.AddLeaf(h.Sum()) - } return acc.Root() } diff --git a/types/types.go b/types/types.go index c96e7a71..fadd5ec8 100644 --- a/types/types.go +++ b/types/types.go @@ -816,11 +816,15 @@ type Block struct { // MerkleRoot returns the Merkle root of the block's miner payouts and // transactions. func (b *Block) MerkleRoot() Hash256 { - var v2txns []V2Transaction + return blockMerkleRoot(b.MinerPayouts, b.Transactions) +} + +// V2Transactions returns the block's v2 transactions, if present. +func (b *Block) V2Transactions() []V2Transaction { if b.V2 != nil { - v2txns = b.V2.Transactions + return b.V2.Transactions } - return blockMerkleRoot(b.MinerPayouts, b.Transactions, v2txns) + return nil } // ID returns a hash that uniquely identifies a block. @@ -828,13 +832,13 @@ func (b *Block) ID() BlockID { buf := make([]byte, 32+8+8+32) binary.LittleEndian.PutUint64(buf[32:], b.Nonce) binary.LittleEndian.PutUint64(buf[40:], uint64(b.Timestamp.Unix())) - if b.V2 == nil { + if b.V2 != nil { + copy(buf[:32], "sia/id/block|") + copy(buf[48:], b.V2.Commitment[:]) + } else { root := b.MerkleRoot() // NOTE: expensive! copy(buf[:32], b.ParentID[:]) copy(buf[48:], root[:]) - } else { - copy(buf[:32], "sia/id/block|") - copy(buf[48:], b.V2.Commitment[:]) } return BlockID(HashBytes(buf)) } From d201766bf762511ac40c2919c1b0ed7943292201 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 29 Aug 2023 12:05:13 -0400 Subject: [PATCH 33/53] types: Un-embed fields from Element types Embedding is convenient, but often causes surprising behavior -- or outright bugs! -- via method promotion. Most commonly, this affects marshalling, e.g. the embedded field's MarshalJSON method is used to marshal the entire struct, causing the other fields to be silently dropped. I swear, every time I think "well, in *this* case, embedding the field is worth it," I turn out to be wrong. Apparently I will never stop making this mistake. :| If you see me push code with embedded fields, please slap me. --- chain/db.go | 22 +++++++-------- chain/manager.go | 4 +-- consensus/state.go | 4 +-- consensus/update.go | 13 ++++----- consensus/validation.go | 59 +++++++++++++++++++++-------------------- types/encoding.go | 4 +-- types/types.go | 30 ++++++++++----------- 7 files changed, 69 insertions(+), 67 deletions(-) diff --git a/chain/db.go b/chain/db.go index a91a5e04..b4a53b99 100644 --- a/chain/db.go +++ b/chain/db.go @@ -386,16 +386,16 @@ func (db *DBStore) applyElements(cau consensus.ApplyUpdate) { cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool) { if resolved { db.deleteFileContractElement(types.FileContractID(fce.ID)) - db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.FileContract.WindowEnd) } else if rev != nil { db.putFileContractElement(*rev) - if rev.WindowEnd != fce.WindowEnd { - db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) - db.putFileContractExpiration(types.FileContractID(fce.ID), rev.WindowEnd) + if rev.FileContract.WindowEnd != fce.FileContract.WindowEnd { + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.FileContract.WindowEnd) + db.putFileContractExpiration(types.FileContractID(fce.ID), rev.FileContract.WindowEnd) } } else { db.putFileContractElement(fce) - db.putFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + db.putFileContractExpiration(types.FileContractID(fce.ID), fce.FileContract.WindowEnd) } db.putElementProof(fce.StateElement) }) @@ -406,20 +406,20 @@ func (db *DBStore) revertElements(cru consensus.RevertUpdate) { if resolved { // contract no longer resolved; restore it db.putFileContractElement(fce) - db.putFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + db.putFileContractExpiration(types.FileContractID(fce.ID), fce.FileContract.WindowEnd) db.putElementProof(fce.StateElement) } else if rev != nil { // contract no longer revised; restore prior revision db.putFileContractElement(fce) - if rev.WindowEnd != fce.WindowEnd { - db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) - db.putFileContractExpiration(types.FileContractID(fce.ID), rev.WindowEnd) + if rev.FileContract.WindowEnd != fce.FileContract.WindowEnd { + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.FileContract.WindowEnd) + db.putFileContractExpiration(types.FileContractID(fce.ID), rev.FileContract.WindowEnd) } db.putElementProof(fce.StateElement) } else { // contract no longer exists; delete it db.deleteFileContractElement(types.FileContractID(fce.ID)) - db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.WindowEnd) + db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.FileContract.WindowEnd) } }) cru.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { @@ -486,7 +486,7 @@ func (db *DBStore) SupplementTipTransaction(txn types.Transaction) (ts consensus } for _, sp := range txn.StorageProofs { if fce, ok := db.getFileContractElement(sp.ParentID, numLeaves); ok { - if windowIndex, ok := db.BestIndex(fce.WindowStart - 1); ok { + if windowIndex, ok := db.BestIndex(fce.FileContract.WindowStart - 1); ok { ts.ValidFileContracts = append(ts.ValidFileContracts, fce) ts.StorageProofBlockIDs = append(ts.StorageProofBlockIDs, windowIndex.ID) } diff --git a/chain/manager.go b/chain/manager.go index 13bb1ec1..90f46657 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -705,7 +705,7 @@ func (m *Manager) applyPoolUpdate(cau consensus.ApplyUpdate) { for i := range txn.FileContractResolutions { cau.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { - cau.UpdateElementProof(&sp.ProofStart.StateElement) + cau.UpdateElementProof(&sp.ProofIndex.StateElement) txn.FileContractResolutions[i].Resolution = sp } } @@ -769,7 +769,7 @@ func (m *Manager) revertPoolUpdate(cru consensus.RevertUpdate) { for i := range txn.FileContractResolutions { cru.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { - cru.UpdateElementProof(&sp.ProofStart.StateElement) + cru.UpdateElementProof(&sp.ProofIndex.StateElement) txn.FileContractResolutions[i].Resolution = sp } } diff --git a/consensus/state.go b/consensus/state.go index af21cf45..6d381604 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -278,7 +278,7 @@ func (s State) V2TransactionWeight(txn types.V2Transaction) uint64 { for _, fcr := range txn.FileContractResolutions { fcr.Parent.MerkleProof = nil if sp, ok := fcr.Resolution.(types.V2StorageProof); ok { - sp.ProofStart.MerkleProof = nil + sp.ProofIndex.MerkleProof = nil fcr.Resolution = sp } fcr.EncodeTo(e) @@ -567,7 +567,7 @@ func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { fcr.Parent.ID.EncodeTo(h.E) // normalize proof if sp, ok := fcr.Resolution.(types.V2StorageProof); ok { - sp.ProofStart.MerkleProof = nil + sp.ProofIndex.MerkleProof = nil fcr.Resolution = sp } fcr.Resolution.(types.EncoderTo).EncodeTo(h.E) diff --git a/consensus/update.go b/consensus/update.go index f3809500..c65dd296 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -390,7 +390,7 @@ func (ms *MidState) ApplyTransaction(txn types.Transaction, ts V1TransactionSupp } for _, sfi := range txn.SiafundInputs { sfe := ms.mustSiafundElement(ts, sfi.ParentID) - claimPortion := ms.siafundPool.Sub(sfe.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfe.Value) + claimPortion := ms.siafundPool.Sub(sfe.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfe.SiafundOutput.Value) ms.spendSiafundElement(sfe, txid) ms.addSiacoinElement(types.SiacoinElement{ StateElement: types.StateElement{ID: types.Hash256(sfi.ParentID.ClaimOutputID())}, @@ -417,7 +417,7 @@ func (ms *MidState) ApplyTransaction(txn types.Transaction, ts V1TransactionSupp for _, sp := range txn.StorageProofs { fce := ms.mustFileContractElement(ts, sp.ParentID) ms.resolveFileContractElement(fce, txid) - for i, sco := range fce.ValidProofOutputs { + for i, sco := range fce.FileContract.ValidProofOutputs { ms.addSiacoinElement(types.SiacoinElement{ StateElement: types.StateElement{ID: types.Hash256(sp.ParentID.ValidOutputID(i))}, SiacoinOutput: sco, @@ -463,7 +463,7 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { } for _, sfi := range txn.SiafundInputs { ms.spendSiafundElement(sfi.Parent, txid) - claimPortion := ms.siafundPool.Sub(sfi.Parent.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfi.Parent.Value) + claimPortion := ms.siafundPool.Sub(sfi.Parent.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfi.Parent.SiafundOutput.Value) ms.addSiacoinElement(types.SiacoinElement{ StateElement: nextElement(), SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, @@ -490,6 +490,7 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { ms.resolveV2FileContractElement(fcr.Parent, txid) fce := fcr.Parent + fc := fce.V2FileContract var renter, host types.SiacoinOutput switch r := fcr.Resolution.(type) { case types.V2FileContractRenewal: @@ -501,11 +502,11 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { V2FileContract: r.InitialRevision, }) case types.V2StorageProof: - renter, host = fce.RenterOutput, fce.HostOutput + renter, host = fc.RenterOutput, fc.HostOutput case types.V2FileContract: // finalization renter, host = r.RenterOutput, r.HostOutput case types.V2FileContractExpiration: - renter, host = fce.RenterOutput, fce.MissedHostOutput() + renter, host = fc.RenterOutput, fc.MissedHostOutput() } ms.addSiacoinElement(types.SiacoinElement{ StateElement: nextElement(), @@ -558,7 +559,7 @@ func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { continue } ms.resolveFileContractElement(fce, types.TransactionID(bid)) - for i, sco := range fce.MissedProofOutputs { + for i, sco := range fce.FileContract.MissedProofOutputs { ms.addSiacoinElement(types.SiacoinElement{ StateElement: types.StateElement{ID: types.Hash256(types.FileContractID(fce.ID).MissedOutputID(i))}, SiacoinOutput: sco, diff --git a/consensus/validation.go b/consensus/validation.go index 86ec7d00..58934297 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -283,12 +283,12 @@ func validateSiacoins(ms *MidState, txn types.Transaction, ts V1TransactionSuppl parent, ok := ms.siacoinElement(ts, sci.ParentID) if !ok { return fmt.Errorf("siacoin input %v spends nonexistent siacoin output %v", i, sci.ParentID) - } else if sci.UnlockConditions.UnlockHash() != parent.Address { + } else if sci.UnlockConditions.UnlockHash() != parent.SiacoinOutput.Address { return fmt.Errorf("siacoin input %v claims incorrect unlock conditions for siacoin output %v", i, sci.ParentID) } else if parent.MaturityHeight > ms.base.childHeight() { return fmt.Errorf("siacoin input %v has immature parent", i) } - inputSum = inputSum.Add(parent.Value) + inputSum = inputSum.Add(parent.SiacoinOutput.Value) } var outputSum types.Currency for _, out := range txn.SiacoinOutputs { @@ -317,14 +317,14 @@ func validateSiafunds(ms *MidState, txn types.Transaction, ts V1TransactionSuppl parent, ok := ms.siafundElement(ts, sfi.ParentID) if !ok { return fmt.Errorf("siafund input %v spends nonexistent siafund output %v", i, sfi.ParentID) - } else if sfi.UnlockConditions.UnlockHash() != parent.Address && + } else if sfi.UnlockConditions.UnlockHash() != parent.SiafundOutput.Address && // override old developer siafund address !(ms.base.childHeight() >= ms.base.Network.HardforkDevAddr.Height && - parent.Address == ms.base.Network.HardforkDevAddr.OldAddress && + parent.SiafundOutput.Address == ms.base.Network.HardforkDevAddr.OldAddress && sfi.UnlockConditions.UnlockHash() == ms.base.Network.HardforkDevAddr.NewAddress) { return fmt.Errorf("siafund input %v claims incorrect unlock conditions for siafund output %v", i, sfi.ParentID) } - inputSum += parent.Value + inputSum += parent.SiafundOutput.Value } var outputSum uint64 for _, out := range txn.SiafundOutputs { @@ -375,9 +375,9 @@ func validateFileContracts(ms *MidState, txn types.Transaction, ts V1Transaction if !ok { return fmt.Errorf("file contract revision %v revises nonexistent file contract %v", i, fcr.ParentID) } - if fcr.FileContract.RevisionNumber <= parent.RevisionNumber { + if fcr.FileContract.RevisionNumber <= parent.FileContract.RevisionNumber { return fmt.Errorf("file contract revision %v does not have a higher revision number than its parent", i) - } else if types.Hash256(fcr.UnlockConditions.UnlockHash()) != parent.UnlockHash { + } else if types.Hash256(fcr.UnlockConditions.UnlockHash()) != parent.FileContract.UnlockHash { return fmt.Errorf("file contract revision %v claims incorrect unlock conditions", i) } outputSum := func(outputs []types.SiacoinOutput) (sum types.Currency) { @@ -386,9 +386,9 @@ func validateFileContracts(ms *MidState, txn types.Transaction, ts V1Transaction } return sum } - if outputSum(fcr.FileContract.ValidProofOutputs) != outputSum(parent.ValidProofOutputs) { + if outputSum(fcr.FileContract.ValidProofOutputs) != outputSum(parent.FileContract.ValidProofOutputs) { return fmt.Errorf("file contract revision %v changes valid payout sum", i) - } else if outputSum(fcr.FileContract.MissedProofOutputs) != outputSum(parent.MissedProofOutputs) { + } else if outputSum(fcr.FileContract.MissedProofOutputs) != outputSum(parent.FileContract.MissedProofOutputs) { return fmt.Errorf("file contract revision %v changes missed payout sum", i) } } @@ -456,10 +456,11 @@ func validateFileContracts(ms *MidState, txn types.Transaction, ts V1Transaction if txid, ok := ms.spent(types.Hash256(sp.ParentID)); ok { return fmt.Errorf("storage proof %v conflicts with previous proof (in %v)", i, txid) } - fc, ok := ms.fileContractElement(ts, sp.ParentID) + fce, ok := ms.fileContractElement(ts, sp.ParentID) if !ok { return fmt.Errorf("storage proof %v references nonexistent file contract", i) } + fc := fce.FileContract windowID := ts.storageProofWindowID(sp.ParentID) leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, windowID, sp.ParentID) leaf := storageProofLeaf(leafIndex, fc.Filesize, sp.Leaf) @@ -680,7 +681,7 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { } // check spend policy - if sci.SpendPolicy.Address() != sci.Parent.Address { + if sci.SpendPolicy.Address() != sci.Parent.SiacoinOutput.Address { return fmt.Errorf("siacoin input %v claims incorrect policy for parent address", i) } else if err := sci.SpendPolicy.Verify(ms.base.Index.Height, sigHash, sci.Signatures); err != nil { return fmt.Errorf("siacoin input %v failed to satisfy spend policy: %w", i, err) @@ -689,7 +690,7 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { var inputSum, outputSum types.Currency for _, sci := range txn.SiacoinInputs { - inputSum = inputSum.Add(sci.Parent.Value) + inputSum = inputSum.Add(sci.Parent.SiacoinOutput.Value) } for _, out := range txn.SiacoinOutputs { outputSum = outputSum.Add(out.Value) @@ -718,34 +719,34 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { sigHash := ms.base.InputSigHash(txn) - for i, sci := range txn.SiafundInputs { - if txid, ok := ms.spent(sci.Parent.ID); ok { + for i, sfi := range txn.SiafundInputs { + if txid, ok := ms.spent(sfi.Parent.ID); ok { return fmt.Errorf("siafund input %v double-spends parent output (previously spent in %v)", i, txid) } // check accumulator - if sci.Parent.LeafIndex == types.EphemeralLeafIndex { - if _, ok := ms.ephemeral[sci.Parent.ID]; !ok { - return fmt.Errorf("siafund input %v spends nonexistent ephemeral output %v", i, sci.Parent.ID) + if sfi.Parent.LeafIndex == types.EphemeralLeafIndex { + if _, ok := ms.ephemeral[sfi.Parent.ID]; !ok { + return fmt.Errorf("siafund input %v spends nonexistent ephemeral output %v", i, sfi.Parent.ID) } - } else if !ms.base.Elements.containsUnspentSiafundElement(sci.Parent) { - if ms.base.Elements.containsSpentSiafundElement(sci.Parent) { - return fmt.Errorf("siafund input %v double-spends output %v", i, sci.Parent.ID) + } else if !ms.base.Elements.containsUnspentSiafundElement(sfi.Parent) { + if ms.base.Elements.containsSpentSiafundElement(sfi.Parent) { + return fmt.Errorf("siafund input %v double-spends output %v", i, sfi.Parent.ID) } - return fmt.Errorf("siafund input %v spends output (%v) not present in the accumulator", i, sci.Parent.ID) + return fmt.Errorf("siafund input %v spends output (%v) not present in the accumulator", i, sfi.Parent.ID) } // check spend policy - if sci.SpendPolicy.Address() != sci.Parent.Address { + if sfi.SpendPolicy.Address() != sfi.Parent.SiafundOutput.Address { return fmt.Errorf("siafund input %v claims incorrect policy for parent address", i) - } else if err := sci.SpendPolicy.Verify(ms.base.Index.Height, sigHash, sci.Signatures); err != nil { + } else if err := sfi.SpendPolicy.Verify(ms.base.Index.Height, sigHash, sfi.Signatures); err != nil { return fmt.Errorf("siafund input %v failed to satisfy spend policy: %w", i, err) } } var inputSum, outputSum uint64 for _, in := range txn.SiafundInputs { - inputSum += in.Parent.Value + inputSum += in.Parent.SiafundOutput.Value } for _, out := range txn.SiafundOutputs { outputSum += out.Value @@ -892,13 +893,13 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { sp := r if ms.base.childHeight() < fc.ProofHeight { return fmt.Errorf("file contract storage proof %v cannot be submitted until after proof height (%v)", i, fc.ProofHeight) - } else if sp.ProofStart.Height != fc.ProofHeight { + } else if sp.ProofIndex.ChainIndex.Height != fc.ProofHeight { // see note on this field in types.StorageProof - return fmt.Errorf("file contract storage proof %v has ProofStart (%v) that does not match contract ProofStart (%v)", i, sp.ProofStart.Height, fc.ProofHeight) - } else if ms.base.Elements.containsChainIndex(sp.ProofStart) { + return fmt.Errorf("file contract storage proof %v has ProofIndex height (%v) that does not match contract ProofHeight (%v)", i, sp.ProofIndex.ChainIndex.Height, fc.ProofHeight) + } else if ms.base.Elements.containsChainIndex(sp.ProofIndex) { return fmt.Errorf("file contract storage proof %v has invalid history proof", i) } - leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofStart.ChainIndex.ID, types.FileContractID(fcr.Parent.ID)) + leafIndex := ms.base.StorageProofLeafIndex(fc.Filesize, sp.ProofIndex.ChainIndex.ID, types.FileContractID(fcr.Parent.ID)) if storageProofRoot(ms.base.StorageProofLeafHash(sp.Leaf[:]), leafIndex, fc.Filesize, sp.Proof) != fc.FileMerkleRoot { return fmt.Errorf("file contract storage proof %v has root that does not match contract Merkle root", i) } @@ -929,7 +930,7 @@ func validateFoundationUpdate(ms *MidState, txn types.V2Transaction) error { return nil } for _, in := range txn.SiacoinInputs { - if in.Parent.Address == ms.base.FoundationPrimaryAddress { + if in.Parent.SiacoinOutput.Address == ms.base.FoundationPrimaryAddress { return nil } } diff --git a/types/encoding.go b/types/encoding.go index 333b5443..cf60bc2e 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -577,7 +577,7 @@ func (ren V2FileContractRenewal) EncodeTo(e *Encoder) { // EncodeTo implements types.EncoderTo. func (sp V2StorageProof) EncodeTo(e *Encoder) { - sp.ProofStart.EncodeTo(e) + sp.ProofIndex.EncodeTo(e) e.Write(sp.Leaf[:]) e.WritePrefix(len(sp.Proof)) for _, p := range sp.Proof { @@ -1130,7 +1130,7 @@ func (ren *V2FileContractRenewal) DecodeFrom(d *Decoder) { // DecodeFrom implements types.DecoderFrom. func (sp *V2StorageProof) DecodeFrom(d *Decoder) { - sp.ProofStart.DecodeFrom(d) + sp.ProofIndex.DecodeFrom(d) d.Read(sp.Leaf[:]) sp.Proof = make([]Hash256, d.ReadPrefix()) for i := range sp.Proof { diff --git a/types/types.go b/types/types.go index fadd5ec8..d4d66850 100644 --- a/types/types.go +++ b/types/types.go @@ -565,11 +565,11 @@ type V2StorageProof struct { // ID of the block at the contract's ProofHeight. The storage proof thus // includes a proof that this ID is the correct ancestor. // - // During validation, it is imperative to check that ProofStart.Height - // matches the ProofStart field of the contract's final revision; otherwise, - // the prover could use any ProofStart, giving them control over the leaf - // index. - ProofStart ChainIndexElement + // During validation, it is imperative to check that ProofIndex.Height + // matches the ProofHeight field of the contract's final revision; + // otherwise, the prover could use any ProofIndex, giving them control over + // the leaf index. + ProofIndex ChainIndexElement // The leaf is always 64 bytes, extended with zeros if necessary. Leaf [64]byte @@ -601,45 +601,45 @@ type StateElement struct { MerkleProof []Hash256 `json:"merkleProof"` } -// A ChainIndexElement is a record of a SiacoinOutput within the state accumulator. +// A ChainIndexElement is a record of a ChainIndex within the state accumulator. type ChainIndexElement struct { StateElement - ChainIndex + ChainIndex ChainIndex `json:"chainIndex"` } // A SiacoinElement is a record of a SiacoinOutput within the state accumulator. type SiacoinElement struct { StateElement - SiacoinOutput - MaturityHeight uint64 `json:"maturityHeight"` + SiacoinOutput SiacoinOutput `json:"siacoinOutput"` + MaturityHeight uint64 `json:"maturityHeight"` } // A SiafundElement is a record of a SiafundOutput within the state accumulator. type SiafundElement struct { StateElement - SiafundOutput - ClaimStart Currency `json:"claimStart"` // value of SiafundPool when element was created + SiafundOutput SiafundOutput `json:"siafundOutput"` + ClaimStart Currency `json:"claimStart"` // value of SiafundPool when element was created } // A FileContractElement is a record of a FileContract within the state // accumulator. type FileContractElement struct { StateElement - FileContract + FileContract FileContract `json:"fileContract"` } // A V2FileContractElement is a record of a V2FileContract within the state // accumulator. type V2FileContractElement struct { StateElement - V2FileContract + V2FileContract V2FileContract `json:"v2FileContract"` } // An AttestationElement is a record of an Attestation within the state // accumulator. type AttestationElement struct { StateElement - Attestation + Attestation Attestation `json:"attestation"` } // A V2Transaction effects a change of blockchain state. @@ -778,7 +778,7 @@ func (txn *V2Transaction) DeepCopy() V2Transaction { for i := range c.FileContractResolutions { c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) if sp, ok := c.FileContractResolutions[i].Resolution.(V2StorageProof); ok { - sp.ProofStart.MerkleProof = append([]Hash256(nil), sp.ProofStart.MerkleProof...) + sp.ProofIndex.MerkleProof = append([]Hash256(nil), sp.ProofIndex.MerkleProof...) sp.Proof = append([]Hash256(nil), sp.Proof...) c.FileContractResolutions[i].Resolution = sp } From 05df97f64f458964d95ca3110dea11d5ff443145 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 29 Aug 2023 12:47:57 -0400 Subject: [PATCH 34/53] types: Use pointer receivers for V2FileContractResolutionType --- chain/manager.go | 6 ++-- consensus/state.go | 16 +++++---- consensus/update.go | 8 ++--- consensus/validation.go | 22 ++++++------ types/encoding.go | 30 ++++++---------- types/types.go | 77 ++++++++++++++++++++++++----------------- 6 files changed, 82 insertions(+), 77 deletions(-) diff --git a/chain/manager.go b/chain/manager.go index 90f46657..dc36190b 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -704,9 +704,8 @@ func (m *Manager) applyPoolUpdate(cau consensus.ApplyUpdate) { } for i := range txn.FileContractResolutions { cau.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) - if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { + if sp, ok := txn.FileContractResolutions[i].Resolution.(*types.V2StorageProof); ok { cau.UpdateElementProof(&sp.ProofIndex.StateElement) - txn.FileContractResolutions[i].Resolution = sp } } } @@ -768,9 +767,8 @@ func (m *Manager) revertPoolUpdate(cru consensus.RevertUpdate) { } for i := range txn.FileContractResolutions { cru.UpdateElementProof(&txn.FileContractResolutions[i].Parent.StateElement) - if sp, ok := txn.FileContractResolutions[i].Resolution.(types.V2StorageProof); ok { + if sp, ok := txn.FileContractResolutions[i].Resolution.(*types.V2StorageProof); ok { cru.UpdateElementProof(&sp.ProofIndex.StateElement) - txn.FileContractResolutions[i].Resolution = sp } } } diff --git a/consensus/state.go b/consensus/state.go index 6d381604..c65cac8d 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -277,9 +277,10 @@ func (s State) V2TransactionWeight(txn types.V2Transaction) uint64 { } for _, fcr := range txn.FileContractResolutions { fcr.Parent.MerkleProof = nil - if sp, ok := fcr.Resolution.(types.V2StorageProof); ok { - sp.ProofIndex.MerkleProof = nil - fcr.Resolution = sp + if sp, ok := fcr.Resolution.(*types.V2StorageProof); ok { + c := *sp // don't modify original + c.ProofIndex.MerkleProof = nil + fcr.Resolution = &c } fcr.EncodeTo(e) } @@ -300,7 +301,7 @@ func (s State) V2TransactionWeight(txn types.V2Transaction) uint64 { signatures += 2 * len(txn.FileContractRevisions) for _, fcr := range txn.FileContractResolutions { switch fcr.Resolution.(type) { - case types.V2FileContractRenewal, types.V2FileContract: + case *types.V2FileContractRenewal, *types.V2FileContract: signatures += 2 } } @@ -566,9 +567,10 @@ func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { for _, fcr := range txn.FileContractResolutions { fcr.Parent.ID.EncodeTo(h.E) // normalize proof - if sp, ok := fcr.Resolution.(types.V2StorageProof); ok { - sp.ProofIndex.MerkleProof = nil - fcr.Resolution = sp + if sp, ok := fcr.Resolution.(*types.V2StorageProof); ok { + c := *sp // don't modify original + c.ProofIndex.MerkleProof = nil + fcr.Resolution = &c } fcr.Resolution.(types.EncoderTo).EncodeTo(h.E) } diff --git a/consensus/update.go b/consensus/update.go index c65dd296..40224110 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -493,7 +493,7 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { fc := fce.V2FileContract var renter, host types.SiacoinOutput switch r := fcr.Resolution.(type) { - case types.V2FileContractRenewal: + case *types.V2FileContractRenewal: renter, host = r.FinalRevision.RenterOutput, r.FinalRevision.HostOutput renter.Value = renter.Value.Sub(r.RenterRollover) host.Value = host.Value.Sub(r.HostRollover) @@ -501,11 +501,11 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { StateElement: nextElement(), V2FileContract: r.InitialRevision, }) - case types.V2StorageProof: + case *types.V2StorageProof: renter, host = fc.RenterOutput, fc.HostOutput - case types.V2FileContract: // finalization + case *types.V2FileContract: // finalization renter, host = r.RenterOutput, r.HostOutput - case types.V2FileContractExpiration: + case *types.V2FileContractExpiration: renter, host = fc.RenterOutput, fc.MissedHostOutput() } ms.addSiacoinElement(types.SiacoinElement{ diff --git a/consensus/validation.go b/consensus/validation.go index 58934297..4a04f2c4 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -643,15 +643,15 @@ func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { } for i, res := range txn.FileContractResolutions { switch r := res.Resolution.(type) { - case types.V2FileContractRenewal: + case *types.V2FileContractRenewal: if r.InitialRevision.RenterOutput.Value.IsZero() && r.InitialRevision.HostOutput.Value.IsZero() { return fmt.Errorf("file contract renewal %v creates contract with zero value", i) } addContract(r.InitialRevision) add(r.RenterRollover) add(r.HostRollover) - case types.V2FileContract: - addContract(r) + case *types.V2FileContract: + addContract(*r) } } add(txn.MinerFee) @@ -699,7 +699,7 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { outputSum = outputSum.Add(fc.RenterOutput.Value).Add(fc.HostOutput.Value).Add(ms.base.V2FileContractTax(fc)) } for _, res := range txn.FileContractResolutions { - if r, ok := res.Resolution.(types.V2FileContractRenewal); ok { + if r, ok := res.Resolution.(*types.V2FileContractRenewal); ok { // a renewal creates a new contract, optionally "rolling over" funds // from the old contract inputSum = inputSum.Add(r.RenterRollover) @@ -855,8 +855,8 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } fc := fcr.Parent.V2FileContract switch r := fcr.Resolution.(type) { - case types.V2FileContractRenewal: - renewal := r + case *types.V2FileContractRenewal: + renewal := *r old, renewed := renewal.FinalRevision, renewal.InitialRevision if old.RevisionNumber != types.MaxRevisionNumber { return fmt.Errorf("file contract renewal %v does not finalize old contract", i) @@ -882,15 +882,15 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } else if !fc.HostPublicKey.VerifyHash(renewalHash, renewal.HostSignature) { return fmt.Errorf("file contract renewal %v has invalid host signature", i) } - case types.V2FileContract: - finalRevision := r + case *types.V2FileContract: + finalRevision := *r if finalRevision.RevisionNumber != types.MaxRevisionNumber { return fmt.Errorf("file contract finalization %v does not set maximum revision number", i) } else if err := validateRevision(fc, finalRevision); err != nil { return fmt.Errorf("file contract finalization %v %s", i, err) } - case types.V2StorageProof: - sp := r + case *types.V2StorageProof: + sp := *r if ms.base.childHeight() < fc.ProofHeight { return fmt.Errorf("file contract storage proof %v cannot be submitted until after proof height (%v)", i, fc.ProofHeight) } else if sp.ProofIndex.ChainIndex.Height != fc.ProofHeight { @@ -903,7 +903,7 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { if storageProofRoot(ms.base.StorageProofLeafHash(sp.Leaf[:]), leafIndex, fc.Filesize, sp.Proof) != fc.FileMerkleRoot { return fmt.Errorf("file contract storage proof %v has root that does not match contract Merkle root", i) } - case types.V2FileContractExpiration: + case *types.V2FileContractExpiration: if ms.base.childHeight() <= fc.ExpirationHeight { return fmt.Errorf("file contract expiration %v cannot be submitted until after expiration height (%v) ", i, fc.ExpirationHeight) } diff --git a/types/encoding.go b/types/encoding.go index cf60bc2e..3d09b949 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -592,21 +592,18 @@ func (V2FileContractExpiration) EncodeTo(e *Encoder) {} func (res V2FileContractResolution) EncodeTo(e *Encoder) { res.Parent.EncodeTo(e) switch r := res.Resolution.(type) { - case V2FileContractRenewal: + case *V2FileContractRenewal: e.WriteUint8(0) - r.EncodeTo(e) - case V2StorageProof: + case *V2StorageProof: e.WriteUint8(1) - r.EncodeTo(e) - case V2FileContract: + case *V2FileContract: e.WriteUint8(2) - r.EncodeTo(e) - case V2FileContractExpiration: + case *V2FileContractExpiration: e.WriteUint8(3) - r.EncodeTo(e) default: panic(fmt.Sprintf("unhandled resolution type %T", r)) } + res.Resolution.(EncoderTo).EncodeTo(e) } // EncodeTo implements types.EncoderTo. @@ -1146,24 +1143,17 @@ func (res *V2FileContractResolution) DecodeFrom(d *Decoder) { res.Parent.DecodeFrom(d) switch t := d.ReadUint8(); t { case 0: - var r V2FileContractRenewal - r.DecodeFrom(d) - res.Resolution = r + res.Resolution = new(V2FileContractRenewal) case 1: - var r V2StorageProof - r.DecodeFrom(d) - res.Resolution = r + res.Resolution = new(V2StorageProof) case 2: - var r V2FileContract - r.DecodeFrom(d) - res.Resolution = r + res.Resolution = new(V2FileContract) case 3: - var r V2FileContractExpiration - r.DecodeFrom(d) - res.Resolution = r + res.Resolution = new(V2FileContractExpiration) default: d.SetErr(fmt.Errorf("unknown resolution type %d", t)) } + res.Resolution.(DecoderFrom).DecodeFrom(d) } // DecodeFrom implements types.DecoderFrom. diff --git a/types/types.go b/types/types.go index d4d66850..1f4d73ce 100644 --- a/types/types.go +++ b/types/types.go @@ -541,10 +541,10 @@ type V2FileContractResolutionType interface { isV2FileContractResolution() } -func (V2FileContractRenewal) isV2FileContractResolution() {} -func (V2StorageProof) isV2FileContractResolution() {} -func (V2FileContract) isV2FileContractResolution() {} // finalization -func (V2FileContractExpiration) isV2FileContractResolution() {} +func (*V2FileContractRenewal) isV2FileContractResolution() {} +func (*V2StorageProof) isV2FileContractResolution() {} +func (*V2FileContract) isV2FileContractResolution() {} // finalization +func (*V2FileContractExpiration) isV2FileContractResolution() {} // A V2FileContractRenewal renews a file contract. type V2FileContractRenewal struct { @@ -777,10 +777,9 @@ func (txn *V2Transaction) DeepCopy() V2Transaction { c.FileContractResolutions = append([]V2FileContractResolution(nil), c.FileContractResolutions...) for i := range c.FileContractResolutions { c.FileContractResolutions[i].Parent.MerkleProof = append([]Hash256(nil), c.FileContractResolutions[i].Parent.MerkleProof...) - if sp, ok := c.FileContractResolutions[i].Resolution.(V2StorageProof); ok { + if sp, ok := c.FileContractResolutions[i].Resolution.(*V2StorageProof); ok { sp.ProofIndex.MerkleProof = append([]Hash256(nil), sp.ProofIndex.MerkleProof...) sp.Proof = append([]Hash256(nil), sp.Proof...) - c.FileContractResolutions[i].Resolution = sp } } c.Attestations = append([]Attestation(nil), c.Attestations...) @@ -1079,23 +1078,52 @@ func (sp *StorageProof) UnmarshalJSON(b []byte) error { return nil } +// MarshalJSON implements json.Marshaler. +func (sp V2StorageProof) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + ProofIndex ChainIndexElement `json:"proofIndex"` + Leaf string `json:"leaf"` + Proof []Hash256 `json:"proof"` + }{sp.ProofIndex, hex.EncodeToString(sp.Leaf[:]), sp.Proof}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (sp *V2StorageProof) UnmarshalJSON(b []byte) error { + var leaf string + err := json.Unmarshal(b, &struct { + ProofIndex *ChainIndexElement + Leaf *string + Proof *[]Hash256 + }{&sp.ProofIndex, &leaf, &sp.Proof}) + if err != nil { + return err + } else if len(leaf) != len(sp.Leaf)*2 { + return errors.New("invalid storage proof leaf length") + } else if _, err = hex.Decode(sp.Leaf[:], []byte(leaf)); err != nil { + return err + } + return nil +} + // MarshalJSON implements json.Marshaler. func (res V2FileContractResolution) MarshalJSON() ([]byte, error) { var typ string switch res.Resolution.(type) { - case V2FileContractRenewal: + case *V2FileContractRenewal: typ = "renewal" - case V2StorageProof: + case *V2StorageProof: typ = "storage proof" - case V2FileContract: + case *V2FileContract: typ = "finalization" - case V2FileContractExpiration: + case *V2FileContractExpiration: typ = "expiration" + default: + panic(fmt.Sprintf("unhandled file contract resolution type %T", res.Resolution)) } return json.Marshal(struct { Parent V2FileContractElement `json:"parent"` Type string `json:"type"` - Resolution V2FileContractResolutionType `json:"resolution,omitempty"` + Resolution V2FileContractResolutionType `json:"resolution"` }{res.Parent, typ, res.Resolution}) } @@ -1111,32 +1139,19 @@ func (res *V2FileContractResolution) UnmarshalJSON(b []byte) error { } switch p.Type { case "renewal": - var r V2FileContractRenewal - if err := json.Unmarshal(p.Resolution, &r); err != nil { - return err - } - res.Resolution = r + res.Resolution = new(V2FileContractRenewal) case "storage proof": - var r V2StorageProof - if err := json.Unmarshal(p.Resolution, &r); err != nil { - return err - } - res.Resolution = r + res.Resolution = new(V2StorageProof) case "finalization": - var r V2FileContract - if err := json.Unmarshal(p.Resolution, &r); err != nil { - return err - } - res.Resolution = r + res.Resolution = new(V2FileContract) case "expiration": - var r V2FileContractExpiration - if err := json.Unmarshal(p.Resolution, &r); err != nil { - return err - } - res.Resolution = r + res.Resolution = new(V2FileContractExpiration) default: return fmt.Errorf("unknown file contract resolution type %q", p.Type) } + if err := json.Unmarshal(p.Resolution, res.Resolution); err != nil { + return err + } res.Parent = p.Parent return nil } From 7e45f8851b67ebb29e84188498a98696ac93c407 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 29 Aug 2023 13:34:12 -0400 Subject: [PATCH 35/53] types: Add explicit V2FileContractFinalization type --- consensus/state.go | 2 +- consensus/update.go | 2 +- consensus/validation.go | 8 +++--- types/encoding.go | 4 +-- types/types.go | 59 ++++++++++++++++++++++++----------------- 5 files changed, 42 insertions(+), 33 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index c65cac8d..4c51ec79 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -301,7 +301,7 @@ func (s State) V2TransactionWeight(txn types.V2Transaction) uint64 { signatures += 2 * len(txn.FileContractRevisions) for _, fcr := range txn.FileContractResolutions { switch fcr.Resolution.(type) { - case *types.V2FileContractRenewal, *types.V2FileContract: + case *types.V2FileContractRenewal, *types.V2FileContractFinalization: signatures += 2 } } diff --git a/consensus/update.go b/consensus/update.go index 40224110..3edf6007 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -503,7 +503,7 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { }) case *types.V2StorageProof: renter, host = fc.RenterOutput, fc.HostOutput - case *types.V2FileContract: // finalization + case *types.V2FileContractFinalization: renter, host = r.RenterOutput, r.HostOutput case *types.V2FileContractExpiration: renter, host = fc.RenterOutput, fc.MissedHostOutput() diff --git a/consensus/validation.go b/consensus/validation.go index 4a04f2c4..274ecb01 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -650,8 +650,8 @@ func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { addContract(r.InitialRevision) add(r.RenterRollover) add(r.HostRollover) - case *types.V2FileContract: - addContract(*r) + case *types.V2FileContractFinalization: + addContract(types.V2FileContract(*r)) } } add(txn.MinerFee) @@ -882,8 +882,8 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } else if !fc.HostPublicKey.VerifyHash(renewalHash, renewal.HostSignature) { return fmt.Errorf("file contract renewal %v has invalid host signature", i) } - case *types.V2FileContract: - finalRevision := *r + case *types.V2FileContractFinalization: + finalRevision := types.V2FileContract(*r) if finalRevision.RevisionNumber != types.MaxRevisionNumber { return fmt.Errorf("file contract finalization %v does not set maximum revision number", i) } else if err := validateRevision(fc, finalRevision); err != nil { diff --git a/types/encoding.go b/types/encoding.go index 3d09b949..447a3dc3 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -596,7 +596,7 @@ func (res V2FileContractResolution) EncodeTo(e *Encoder) { e.WriteUint8(0) case *V2StorageProof: e.WriteUint8(1) - case *V2FileContract: + case *V2FileContractFinalization: e.WriteUint8(2) case *V2FileContractExpiration: e.WriteUint8(3) @@ -1147,7 +1147,7 @@ func (res *V2FileContractResolution) DecodeFrom(d *Decoder) { case 1: res.Resolution = new(V2StorageProof) case 2: - res.Resolution = new(V2FileContract) + res.Resolution = new(V2FileContractFinalization) case 3: res.Resolution = new(V2FileContractExpiration) default: diff --git a/types/types.go b/types/types.go index 1f4d73ce..3fee2c0a 100644 --- a/types/types.go +++ b/types/types.go @@ -508,29 +508,34 @@ type V2FileContractRevision struct { // A V2FileContractResolution closes a v2 file contract's payment channel. There // are four ways a contract can be resolved: // -// 1) The host can submit a storage proof. This is considered a "valid" -// resolution: the RenterOutput and HostOutput fields of the (finalized) -// contract are created. +// 1) The renter and host can sign a final contract revision (a "finalization"), +// after which the contract cannot be revised further. // -// 2) The renter and host can sign a final contract revision (a "finalization"), -// setting the contract's revision number to its maximum legal value. This is -// considered a "valid" resolution. -// -// 3) The renter and host can jointly renew the contract. The old contract is +// 2) The renter and host can jointly renew the contract. The old contract is // finalized, and a portion of its funds are "rolled over" into a new contract. -// This is considered a "valid" resolution. // -// 4) Lastly, anyone can submit a contract expiration. Typically, this results -// in a "missed" resolution: the RenterOutput is created as usual, but the -// HostOutput will have value equal to MissedHostValue. However, if the contract -// is empty (i.e. its Filesize is 0), it instead resolves as valid. +// 3) The host can submit a storage proof, asserting that it has faithfully +// stored the contract data for the agreed-upon duration. Typically, a storage +// proof is only required if the renter is unable or unwilling to sign a +// finalization or renewal. A storage proof can only be submitted after the +// contract's ProofHeight; this allows the renter (or host) to broadcast the +// latest contract revision prior to the proof. +// +// 4) Lastly, anyone can submit a contract expiration. Typically, an expiration +// is only required if the host is unable or unwilling to sign a finalization or +// renewal. An expiration can only be submitted after the contract's +// ExpirationHeight; this gives the host a reasonable window of time after the +// ProofHeight in which to submit a storage proof. // -// There are two restrictions on when a particular type of resolution may be -// submitted: a storage proof may only be submitted after the contract's -// ProofHeight, and an expiration may only be submitted after the contract's -// ExpirationHeight. Since anyone can submit an expiration, it is generally in -// the renter and/or host's interest to submit a different type of resolution -// prior to the ExpirationHeight. +// Once a contract has been resolved, it cannot be altered or resolved again. +// When a contract is resolved, its RenterOutput and HostOutput are created +// immediately (though they will not be spendable until their timelock expires). +// However, if the contract is resolved via an expiration, the HostOutput will +// have value equal to MissedHostValue; in other words, the host forfeits its +// collateral. This is considered a "missed" resolution; all other resolution +// types are "valid." As a special case, the expiration of an empty contract is +// considered valid, reflecting the fact that the host has not failed to perform +// any duty. type V2FileContractResolution struct { Parent V2FileContractElement `json:"parent"` Resolution V2FileContractResolutionType `json:"resolution"` @@ -541,10 +546,14 @@ type V2FileContractResolutionType interface { isV2FileContractResolution() } -func (*V2FileContractRenewal) isV2FileContractResolution() {} -func (*V2StorageProof) isV2FileContractResolution() {} -func (*V2FileContract) isV2FileContractResolution() {} // finalization -func (*V2FileContractExpiration) isV2FileContractResolution() {} +func (*V2FileContractFinalization) isV2FileContractResolution() {} +func (*V2FileContractRenewal) isV2FileContractResolution() {} +func (*V2StorageProof) isV2FileContractResolution() {} +func (*V2FileContractExpiration) isV2FileContractResolution() {} + +// A V2FileContractFinalization finalizes a contract, preventing further +// revisions and immediately creating its valid outputs. +type V2FileContractFinalization V2FileContract // A V2FileContractRenewal renews a file contract. type V2FileContractRenewal struct { @@ -1113,7 +1122,7 @@ func (res V2FileContractResolution) MarshalJSON() ([]byte, error) { typ = "renewal" case *V2StorageProof: typ = "storage proof" - case *V2FileContract: + case *V2FileContractFinalization: typ = "finalization" case *V2FileContractExpiration: typ = "expiration" @@ -1143,7 +1152,7 @@ func (res *V2FileContractResolution) UnmarshalJSON(b []byte) error { case "storage proof": res.Resolution = new(V2StorageProof) case "finalization": - res.Resolution = new(V2FileContract) + res.Resolution = new(V2FileContractFinalization) case "expiration": res.Resolution = new(V2FileContractExpiration) default: From ce5eab0087f1a064948c242a9f093b428e4cc5d3 Mon Sep 17 00:00:00 2001 From: Christopher Tarry Date: Mon, 21 Aug 2023 18:08:02 -0400 Subject: [PATCH 36/53] consensus: Add v2 validation tests --- consensus/update.go | 35 +++- consensus/validation.go | 14 +- consensus/validation_test.go | 393 +++++++++++++++++++++++++++++++++++ types/policy.go | 2 +- types/policy_test.go | 8 +- 5 files changed, 437 insertions(+), 15 deletions(-) diff --git a/consensus/update.go b/consensus/update.go index 3edf6007..cafbb26d 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -362,8 +362,21 @@ func (ms *MidState) addV2FileContractElement(fce types.V2FileContractElement) { } func (ms *MidState) reviseV2FileContractElement(fce types.V2FileContractElement, rev types.V2FileContract) { - fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) - ms.v2fces = append(ms.v2fces, fce) + if i, ok := ms.ephemeral[fce.ID]; ok { + ms.v2fces[i].V2FileContract = rev + } else { + if r, ok := ms.v2revs[fce.ID]; ok { + r.V2FileContract = rev + } else { + // store the original + fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) + ms.v2fces = append(ms.v2fces, fce) + // store the revision + fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) + fce.V2FileContract = rev + ms.v2revs[fce.ID] = &fce + } + } } func (ms *MidState) resolveV2FileContractElement(fce types.V2FileContractElement, txid types.TransactionID) { @@ -589,7 +602,11 @@ func (ms *MidState) forEachElementLeaf(fn func(elementLeaf)) { } } for i := range ms.v2fces { - fn(v2FileContractLeaf(&ms.v2fces[i], ms.isSpent(ms.v2fces[i].ID))) + if r, ok := ms.v2revs[ms.v2fces[i].ID]; ok { + fn(v2FileContractLeaf(r, ms.isSpent(ms.v2fces[i].ID))) + } else { + fn(v2FileContractLeaf(&ms.v2fces[i], ms.isSpent(ms.v2fces[i].ID))) + } } for i := range ms.aes { fn(attestationLeaf(&ms.aes[i])) @@ -621,7 +638,15 @@ func (au ApplyUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, sp // au. If the contract was revised, rev is non-nil. func (au ApplyUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { for _, fce := range au.ms.fces { - fn(fce, au.ms.revision(fce.ID), au.ms.isSpent(fce.ID)) + fn(fce, au.ms.revs[fce.ID], au.ms.isSpent(fce.ID)) + } +} + +// ForEachV2FileContractElement calls fn on each V2 file contract element +// related to au. If the contract was revised, rev is non-nil. +func (au ApplyUpdate) ForEachV2FileContractElement(fn func(fce types.V2FileContractElement, rev *types.V2FileContractElement, resolved bool)) { + for _, fce := range au.ms.v2fces { + fn(fce, au.ms.v2revs[fce.ID], au.ms.isSpent(fce.ID)) } } @@ -686,7 +711,7 @@ func (ru RevertUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, s func (ru RevertUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { for i := range ru.ms.fces { fce := ru.ms.fces[len(ru.ms.fces)-i-1] - fn(fce, ru.ms.revision(fce.ID), ru.ms.isSpent(fce.ID)) + fn(fce, ru.ms.revs[fce.ID], ru.ms.isSpent(fce.ID)) } } diff --git a/consensus/validation.go b/consensus/validation.go index 274ecb01..b3b7f89d 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -56,7 +56,7 @@ func validateMinerPayouts(s State, b types.Block) error { } } if len(b.MinerPayouts) != 1 { - return errors.New("block has multiple miner payouts") + return errors.New("block must have exactly one miner payout") } } @@ -109,6 +109,7 @@ type MidState struct { ephemeral map[types.Hash256]int // indices into element slices spends map[types.Hash256]types.TransactionID revs map[types.Hash256]*types.FileContractElement + v2revs map[types.Hash256]*types.V2FileContractElement siafundPool types.Currency foundationPrimary types.Address foundationFailsafe types.Address @@ -172,10 +173,6 @@ func (ms *MidState) mustFileContractElement(ts V1TransactionSupplement, id types return fce } -func (ms *MidState) revision(id types.Hash256) *types.FileContractElement { - return ms.revs[id] -} - func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { txid, ok := ms.spends[id] return txid, ok @@ -193,6 +190,7 @@ func NewMidState(s State) *MidState { ephemeral: make(map[types.Hash256]int), spends: make(map[types.Hash256]types.TransactionID), revs: make(map[types.Hash256]*types.FileContractElement), + v2revs: make(map[types.Hash256]*types.V2FileContractElement), siafundPool: s.SiafundPool, foundationPrimary: s.FoundationPrimaryAddress, foundationFailsafe: s.FoundationFailsafeAddress, @@ -971,7 +969,7 @@ func ValidateBlock(s State, b types.Block, bs V1BlockSupplement) error { } for i, txn := range b.Transactions { if err := ValidateTransaction(ms, txn, bs.Transactions[i]); err != nil { - return err + return fmt.Errorf("transaction %v is invalid: %w", i, err) } ms.ApplyTransaction(txn, bs.Transactions[i]) } @@ -980,9 +978,9 @@ func ValidateBlock(s State, b types.Block, bs V1BlockSupplement) error { if s.childHeight() < ms.base.Network.HardforkV2.AllowHeight { return errors.New("v2 transactions are not allowed until v2 hardfork begins") } - for _, txn := range b.V2.Transactions { + for i, txn := range b.V2.Transactions { if err := ValidateV2Transaction(ms, txn); err != nil { - return err + return fmt.Errorf("v2 transaction %v is invalid: %w", i, err) } ms.ApplyV2Transaction(txn) } diff --git a/consensus/validation_test.go b/consensus/validation_test.go index 604821d6..7393ea4d 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -3,6 +3,7 @@ package consensus_test import ( "bytes" "testing" + "time" "go.sia.tech/core/chain" "go.sia.tech/core/consensus" @@ -420,3 +421,395 @@ func TestValidateBlock(t *testing.T) { } } } + +func TestValidateV2Block(t *testing.T) { + n, genesisBlock := chain.TestnetZen() + + n.HardforkTax.Height = 0 + n.HardforkFoundation.Height = 0 + n.InitialTarget = types.BlockID{0xFF} + n.HardforkV2.AllowHeight = 0 + n.HardforkV2.RequireHeight = 1025000 + + giftPrivateKey := types.GeneratePrivateKey() + giftPublicKey := giftPrivateKey.PublicKey() + + renterPrivateKey := types.GeneratePrivateKey() + renterPublicKey := renterPrivateKey.PublicKey() + hostPrivateKey := types.GeneratePrivateKey() + hostPublicKey := hostPrivateKey.PublicKey() + + signTxn := func(cs consensus.State, txn *types.V2Transaction) { + // file contract signing must be first because state.InputSigHash + // is different without renter/host signatures + for i := range txn.FileContracts { + txn.FileContracts[i].RenterSignature = renterPrivateKey.SignHash(cs.ContractSigHash(txn.FileContracts[i])) + txn.FileContracts[i].HostSignature = hostPrivateKey.SignHash(cs.ContractSigHash(txn.FileContracts[i])) + } + for i := range txn.FileContractRevisions { + txn.FileContractRevisions[i].Revision.RenterSignature = renterPrivateKey.SignHash(cs.ContractSigHash(txn.FileContractRevisions[i].Revision)) + txn.FileContractRevisions[i].Revision.HostSignature = hostPrivateKey.SignHash(cs.ContractSigHash(txn.FileContractRevisions[i].Revision)) + } + for i := range txn.SiacoinInputs { + txn.SiacoinInputs[i].Signatures = append(txn.SiacoinInputs[i].Signatures, giftPrivateKey.SignHash(cs.InputSigHash(*txn))) + } + for i := range txn.SiafundInputs { + txn.SiafundInputs[i].Signatures = append(txn.SiafundInputs[i].Signatures, giftPrivateKey.SignHash(cs.InputSigHash(*txn))) + } + } + + giftAddress := types.StandardUnlockHash(giftPublicKey) + giftPolicy := types.SpendPolicy{Type: types.PolicyTypeUnlockConditions(types.StandardUnlockConditions(giftPublicKey))} + + giftAmountSC := types.Siacoins(100) + giftAmountSF := uint64(100) + v1GiftFC := rhpv2.PrepareContractFormation(renterPublicKey, hostPublicKey, types.Siacoins(1), types.Siacoins(1), 100, rhpv2.HostSettings{}, types.VoidAddress) + v2GiftFC := types.V2FileContract{ + Filesize: v1GiftFC.Filesize, + ProofHeight: 5, + ExpirationHeight: 10, + // ExpirationHeight: + RenterOutput: v1GiftFC.ValidProofOutputs[0], + HostOutput: v1GiftFC.ValidProofOutputs[1], + MissedHostValue: v1GiftFC.MissedProofOutputs[1].Value, + TotalCollateral: v1GiftFC.Payout, + RenterPublicKey: renterPublicKey, + HostPublicKey: hostPublicKey, + } + + giftTxn := types.V2Transaction{ + SiacoinOutputs: []types.SiacoinOutput{ + {Address: giftAddress, Value: giftAmountSC}, + }, + SiafundOutputs: []types.SiafundOutput{ + {Address: giftAddress, Value: giftAmountSF}, + }, + FileContracts: []types.V2FileContract{v2GiftFC}, + } + + genesisBlock.Transactions = nil + genesisBlock.V2 = &types.V2BlockData{ + Transactions: []types.V2Transaction{giftTxn}, + } + + bs := consensus.V1BlockSupplement{Transactions: make([]consensus.V1TransactionSupplement, len(genesisBlock.Transactions))} + _, cau := consensus.ApplyBlock(n.GenesisState(), genesisBlock, bs, time.Time{}) + + var sces []types.SiacoinElement + cau.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + sces = append(sces, sce) + }) + var sfes []types.SiafundElement + cau.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + sfes = append(sfes, sfe) + }) + var fces []types.V2FileContractElement + cau.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, resolved bool) { + fces = append(fces, fce) + }) + + dbStore, checkpoint, err := chain.NewDBStore(chain.NewMemDB(), n, genesisBlock) + if err != nil { + t.Fatal(err) + } + cs := checkpoint.State + + fc := v2GiftFC + fc.TotalCollateral = fc.HostOutput.Value + difference, _ := types.ParseCurrency("2080000000000000000000000") + + rev1 := v2GiftFC + rev1.RevisionNumber++ + rev2 := rev1 + rev2.RevisionNumber++ + minerFee := types.Siacoins(1) + b := types.Block{ + ParentID: genesisBlock.ID(), + Timestamp: types.CurrentTimestamp(), + V2: &types.V2BlockData{ + Height: 1, + Transactions: []types.V2Transaction{{ + SiacoinInputs: []types.V2SiacoinInput{{ + Parent: sces[0], + SpendPolicy: giftPolicy, + }}, + SiafundInputs: []types.V2SiafundInput{{ + Parent: sfes[0], + ClaimAddress: types.VoidAddress, + SpendPolicy: giftPolicy, + }}, + SiacoinOutputs: []types.SiacoinOutput{ + {Value: giftAmountSC.Sub(minerFee).Sub(difference), Address: giftAddress}, + }, + SiafundOutputs: []types.SiafundOutput{ + {Value: giftAmountSF / 2, Address: giftAddress}, + {Value: giftAmountSF / 2, Address: types.VoidAddress}, + }, + FileContracts: []types.V2FileContract{fc}, + FileContractRevisions: []types.V2FileContractRevision{ + { + Parent: fces[0], + Revision: rev1, + }, + { + Parent: fces[0], + Revision: rev2, + }, + }, + MinerFee: minerFee, + }}, + }, + MinerPayouts: []types.SiacoinOutput{{ + Address: types.VoidAddress, + Value: cs.BlockReward().Add(minerFee), + }}, + } + signTxn(cs, &b.V2.Transactions[0]) + b.V2.Commitment = cs.Commitment(b.MinerPayouts[0].Address, b.Transactions, b.V2.Transactions) + + // initial block should be valid + validBlock := deepCopyBlock(b) + findBlockNonce(cs, &validBlock) + if err := consensus.ValidateBlock(cs, validBlock, dbStore.SupplementTipBlock(validBlock)); err != nil { + t.Fatal(err) + } + + { + tests := []struct { + desc string + corrupt func(*types.Block) + }{ + { + "weight that exceeds the limit", + func(b *types.Block) { + data := make([]byte, cs.MaxBlockWeight()) + b.V2.Transactions = append(b.V2.Transactions, types.V2Transaction{ + ArbitraryData: data, + }) + }, + }, + { + "wrong parent ID", + func(b *types.Block) { + b.ParentID[0] ^= 255 + }, + }, + { + "wrong timestamp", + func(b *types.Block) { + b.Timestamp = b.Timestamp.AddDate(-1, 0, 0) + }, + }, + { + "no miner payout", + func(b *types.Block) { + b.MinerPayouts = nil + }, + }, + { + "zero miner payout", + func(b *types.Block) { + b.MinerPayouts = []types.SiacoinOutput{{ + Address: types.VoidAddress, + Value: types.ZeroCurrency, + }} + }, + }, + { + "incorrect miner payout", + func(b *types.Block) { + b.MinerPayouts = []types.SiacoinOutput{{ + Address: types.VoidAddress, + Value: cs.BlockReward().Div64(2), + }} + }, + }, + { + "zero-valued SiacoinOutput", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + for i := range txn.SiacoinOutputs { + txn.SiacoinOutputs[i].Value = types.ZeroCurrency + } + txn.SiacoinInputs = nil + txn.FileContracts = nil + }, + }, + { + "zero-valued SiafundOutput", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + for i := range txn.SiafundOutputs { + txn.SiafundOutputs[i].Value = 0 + } + txn.SiafundInputs = nil + }, + }, + { + "zero-valued MinerFee", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.MinerFee = types.ZeroCurrency + }, + }, + { + "overflowing MinerFees", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.MinerFee = types.MaxCurrency + }, + }, + { + "siacoin outputs exceed inputs", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiacoinOutputs[0].Value = txn.SiacoinOutputs[0].Value.Add(types.NewCurrency64(1)) + }, + }, + { + "siacoin outputs less than inputs", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiacoinOutputs[0].Value = txn.SiacoinOutputs[0].Value.Sub(types.NewCurrency64(1)) + }, + }, + { + "siafund outputs exceed inputs", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiafundOutputs[0].Value = txn.SiafundOutputs[0].Value + 1 + }, + }, + { + "siafund outputs less than inputs", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiafundOutputs[0].Value = txn.SiafundOutputs[0].Value - 1 + }, + }, + { + "two of the same siacoin input", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiacoinInputs = append(txn.SiacoinInputs, txn.SiacoinInputs[0]) + }, + }, + { + "two of the same siafund input", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiafundInputs = append(txn.SiafundInputs, txn.SiafundInputs[0]) + }, + }, + { + "siacoin input claiming incorrect unlock conditions", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + + if uc, ok := txn.SiacoinInputs[0].SpendPolicy.Type.(types.PolicyTypeUnlockConditions); ok { + uc.PublicKeys[0].Key[0] ^= 255 + txn.SiacoinInputs[0].SpendPolicy = types.SpendPolicy{Type: types.PolicyTypeUnlockConditions(uc)} + } + }, + }, + { + "siafund input claiming incorrect unlock conditions", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + + if uc, ok := txn.SiafundInputs[0].SpendPolicy.Type.(types.PolicyTypeUnlockConditions); ok { + uc.PublicKeys[0].Key[0] ^= 255 + txn.SiafundInputs[0].SpendPolicy = types.SpendPolicy{Type: types.PolicyTypeUnlockConditions(uc)} + } + }, + }, + { + "invalid FoundationAddressUpdate", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + addr := types.VoidAddress + txn.NewFoundationAddress = &addr + }, + }, + { + "revision with window that starts in past", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.FileContractRevisions[0].Revision.ProofHeight = cs.Index.Height + }, + }, + { + "revision with window that ends before it begins", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.FileContractRevisions[0].Revision.ExpirationHeight = txn.FileContractRevisions[0].Revision.ProofHeight + }, + }, + { + "revision with lower revision number than its parent", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.FileContractRevisions[0].Revision.RevisionNumber = 0 + }, + }, + { + "revision having different valid payout sum", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.FileContractRevisions[0].Revision.HostOutput.Value = txn.FileContractRevisions[0].Revision.HostOutput.Value.Add(types.Siacoins(1)) + }, + }, + { + "conflicting revisions in same transaction", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + newRevision := txn.FileContractRevisions[0] + newRevision.Revision.RevisionNumber++ + txn.FileContractRevisions = append(txn.FileContractRevisions, newRevision) + }, + }, + { + "window that starts in the past", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.FileContracts[0].ProofHeight = 0 + }, + }, + { + "window that ends before it begins", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.FileContracts[0].ProofHeight = txn.FileContracts[0].ExpirationHeight + }, + }, + { + "valid payout that does not equal missed payout", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.FileContracts[0].HostOutput.Value = txn.FileContracts[0].HostOutput.Value.Add(types.Siacoins(1)) + }, + }, + { + "incorrect payout tax", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiacoinOutputs[0].Value = txn.SiacoinOutputs[0].Value.Add(types.Siacoins(1)) + txn.FileContracts[0].TotalCollateral = txn.FileContracts[0].TotalCollateral.Sub(types.Siacoins(1)) + }, + }, + } + for _, test := range tests { + corruptBlock := deepCopyBlock(validBlock) + test.corrupt(&corruptBlock) + signTxn(cs, &corruptBlock.V2.Transactions[0]) + if len(corruptBlock.MinerPayouts) > 0 { + corruptBlock.V2.Commitment = cs.Commitment(corruptBlock.MinerPayouts[0].Address, corruptBlock.Transactions, corruptBlock.V2.Transactions) + } + findBlockNonce(cs, &corruptBlock) + + if err := consensus.ValidateBlock(cs, corruptBlock, dbStore.SupplementTipBlock(corruptBlock)); err == nil { + t.Fatalf("accepted block with %v", test.desc) + } + } + } +} diff --git a/types/policy.go b/types/policy.go index 42de21c3..8f672e05 100644 --- a/types/policy.go +++ b/types/policy.go @@ -109,7 +109,7 @@ func (p SpendPolicy) Verify(height uint64, sigHash Hash256, sigs []Signature) er verify = func(p SpendPolicy) error { switch p := p.Type.(type) { case PolicyTypeAbove: - if height > uint64(p) { + if height >= uint64(p) { return nil } return fmt.Errorf("height not above %v", uint64(p)) diff --git a/types/policy_test.go b/types/policy_test.go index ad15ae52..8b4b8ccc 100644 --- a/types/policy_test.go +++ b/types/policy_test.go @@ -32,10 +32,16 @@ func TestPolicyVerify(t *testing.T) { PolicyAbove(0), 0, nil, + true, + }, + { + PolicyAbove(1), + 0, + nil, false, }, { - PolicyAbove(0), + PolicyAbove(1), 1, nil, true, From c2d9fd9da64658e1cadbafffc0e5d85dae80b1dd Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sat, 2 Sep 2023 10:11:28 -0400 Subject: [PATCH 37/53] types: Add PolicyAfter and PolicyHash These policies enable HTLCs, which in turn enable atomic swaps between Sia and other cryptocurrencies, namely Bitcoin and Ethereum. Also refactor inputs to use SatisfiedPolicy, which pairs a policy with its signatures and preimages. This lets us encode inputs slightly more efficiently, as length prefixes are no longer necessary: instead, the decoder can recurse over the policy itself to discover exactly how many signatures and preimages to expect. --- consensus/state.go | 4 +- consensus/validation.go | 10 +-- consensus/validation_test.go | 47 +++++++------ types/encoding.go | 72 ++++++++++++++------ types/policy.go | 124 +++++++++++++++++++++++++++++++++-- types/policy_test.go | 3 +- types/types.go | 30 +++++---- 7 files changed, 224 insertions(+), 66 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 4c51ec79..16df7ffd 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -292,10 +292,10 @@ func (s State) V2TransactionWeight(txn types.V2Transaction) uint64 { var signatures int for _, sci := range txn.SiacoinInputs { - signatures += len(sci.Signatures) + signatures += len(sci.SatisfiedPolicy.Signatures) } for _, sfi := range txn.SiafundInputs { - signatures += len(sfi.Signatures) + signatures += len(sfi.SatisfiedPolicy.Signatures) } signatures += 2 * len(txn.FileContracts) signatures += 2 * len(txn.FileContractRevisions) diff --git a/consensus/validation.go b/consensus/validation.go index b3b7f89d..17649319 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -679,9 +679,10 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { } // check spend policy - if sci.SpendPolicy.Address() != sci.Parent.SiacoinOutput.Address { + sp := sci.SatisfiedPolicy + if sp.Policy.Address() != sci.Parent.SiacoinOutput.Address { return fmt.Errorf("siacoin input %v claims incorrect policy for parent address", i) - } else if err := sci.SpendPolicy.Verify(ms.base.Index.Height, sigHash, sci.Signatures); err != nil { + } else if err := sp.Policy.Verify(ms.base.Index.Height, ms.base.medianTimestamp(), sigHash, sp.Signatures, sp.Preimages); err != nil { return fmt.Errorf("siacoin input %v failed to satisfy spend policy: %w", i, err) } } @@ -735,9 +736,10 @@ func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { } // check spend policy - if sfi.SpendPolicy.Address() != sfi.Parent.SiafundOutput.Address { + sp := sfi.SatisfiedPolicy + if sp.Policy.Address() != sfi.Parent.SiafundOutput.Address { return fmt.Errorf("siafund input %v claims incorrect policy for parent address", i) - } else if err := sfi.SpendPolicy.Verify(ms.base.Index.Height, sigHash, sfi.Signatures); err != nil { + } else if err := sp.Policy.Verify(ms.base.Index.Height, ms.base.medianTimestamp(), sigHash, sp.Signatures, sp.Preimages); err != nil { return fmt.Errorf("siafund input %v failed to satisfy spend policy: %w", i, err) } } diff --git a/consensus/validation_test.go b/consensus/validation_test.go index 7393ea4d..ecad00ef 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -433,6 +433,8 @@ func TestValidateV2Block(t *testing.T) { giftPrivateKey := types.GeneratePrivateKey() giftPublicKey := giftPrivateKey.PublicKey() + giftPolicy := types.PolicyPublicKey(giftPublicKey) + giftAddress := types.StandardAddress(giftPublicKey) renterPrivateKey := types.GeneratePrivateKey() renterPublicKey := renterPrivateKey.PublicKey() @@ -450,17 +452,18 @@ func TestValidateV2Block(t *testing.T) { txn.FileContractRevisions[i].Revision.RenterSignature = renterPrivateKey.SignHash(cs.ContractSigHash(txn.FileContractRevisions[i].Revision)) txn.FileContractRevisions[i].Revision.HostSignature = hostPrivateKey.SignHash(cs.ContractSigHash(txn.FileContractRevisions[i].Revision)) } + sp := types.SatisfiedPolicy{ + Policy: giftPolicy, + Signatures: []types.Signature{giftPrivateKey.SignHash(cs.InputSigHash(*txn))}, + } for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].Signatures = append(txn.SiacoinInputs[i].Signatures, giftPrivateKey.SignHash(cs.InputSigHash(*txn))) + txn.SiacoinInputs[i].SatisfiedPolicy = sp } for i := range txn.SiafundInputs { - txn.SiafundInputs[i].Signatures = append(txn.SiafundInputs[i].Signatures, giftPrivateKey.SignHash(cs.InputSigHash(*txn))) + txn.SiafundInputs[i].SatisfiedPolicy = sp } } - giftAddress := types.StandardUnlockHash(giftPublicKey) - giftPolicy := types.SpendPolicy{Type: types.PolicyTypeUnlockConditions(types.StandardUnlockConditions(giftPublicKey))} - giftAmountSC := types.Siacoins(100) giftAmountSF := uint64(100) v1GiftFC := rhpv2.PrepareContractFormation(renterPublicKey, hostPublicKey, types.Siacoins(1), types.Siacoins(1), 100, rhpv2.HostSettings{}, types.VoidAddress) @@ -530,13 +533,11 @@ func TestValidateV2Block(t *testing.T) { Height: 1, Transactions: []types.V2Transaction{{ SiacoinInputs: []types.V2SiacoinInput{{ - Parent: sces[0], - SpendPolicy: giftPolicy, + Parent: sces[0], }}, SiafundInputs: []types.V2SiafundInput{{ Parent: sfes[0], ClaimAddress: types.VoidAddress, - SpendPolicy: giftPolicy, }}, SiacoinOutputs: []types.SiacoinOutput{ {Value: giftAmountSC.Sub(minerFee).Sub(difference), Address: giftAddress}, @@ -702,25 +703,31 @@ func TestValidateV2Block(t *testing.T) { }, }, { - "siacoin input claiming incorrect unlock conditions", + "siacoin input claiming incorrect policy", func(b *types.Block) { txn := &b.V2.Transactions[0] - - if uc, ok := txn.SiacoinInputs[0].SpendPolicy.Type.(types.PolicyTypeUnlockConditions); ok { - uc.PublicKeys[0].Key[0] ^= 255 - txn.SiacoinInputs[0].SpendPolicy = types.SpendPolicy{Type: types.PolicyTypeUnlockConditions(uc)} - } + txn.SiacoinInputs[0].SatisfiedPolicy.Policy = types.AnyoneCanSpend() }, }, { - "siafund input claiming incorrect unlock conditions", + "siafund input claiming incorrect policy", func(b *types.Block) { txn := &b.V2.Transactions[0] - - if uc, ok := txn.SiafundInputs[0].SpendPolicy.Type.(types.PolicyTypeUnlockConditions); ok { - uc.PublicKeys[0].Key[0] ^= 255 - txn.SiafundInputs[0].SpendPolicy = types.SpendPolicy{Type: types.PolicyTypeUnlockConditions(uc)} - } + txn.SiafundInputs[0].SatisfiedPolicy.Policy = types.AnyoneCanSpend() + }, + }, + { + "siacoin input claiming invalid policy", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiacoinInputs[0].SatisfiedPolicy.Signatures[0][0] ^= 1 + }, + }, + { + "siafund input claiming invalid policy", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + txn.SiafundInputs[0].SatisfiedPolicy.Signatures[0][0] ^= 1 }, }, { diff --git a/types/encoding.go b/types/encoding.go index 447a3dc3..3f10957f 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -479,6 +479,30 @@ func (p SpendPolicy) EncodeTo(e *Encoder) { p.encodePolicy(e) } +// EncodeTo implements types.EncoderTo. +func (sp SatisfiedPolicy) EncodeTo(e *Encoder) { + sp.Policy.EncodeTo(e) + var sigi, prei int + var rec func(SpendPolicy) + rec = func(p SpendPolicy) { + switch p := p.Type.(type) { + case PolicyTypePublicKey: + sp.Signatures[sigi].EncodeTo(e) + sigi++ + case PolicyTypeHash: + e.WriteBytes(sp.Preimages[prei]) + prei++ + case PolicyTypeThreshold: + for i := range p.Of { + rec(p.Of[i]) + } + default: + // nothing to do + } + } + rec(sp.Policy) +} + // EncodeTo implements types.EncoderTo. func (se StateElement) EncodeTo(e *Encoder) { se.ID.EncodeTo(e) @@ -492,11 +516,7 @@ func (se StateElement) EncodeTo(e *Encoder) { // EncodeTo implements types.EncoderTo. func (in V2SiacoinInput) EncodeTo(e *Encoder) { in.Parent.EncodeTo(e) - in.SpendPolicy.EncodeTo(e) - e.WritePrefix(len(in.Signatures)) - for _, sig := range in.Signatures { - sig.EncodeTo(e) - } + in.SatisfiedPolicy.EncodeTo(e) } // EncodeTo implements types.EncoderTo. @@ -516,11 +536,7 @@ func (sce SiacoinElement) EncodeTo(e *Encoder) { func (in V2SiafundInput) EncodeTo(e *Encoder) { in.Parent.EncodeTo(e) in.ClaimAddress.EncodeTo(e) - in.SpendPolicy.EncodeTo(e) - e.WritePrefix(len(in.Signatures)) - for _, sig := range in.Signatures { - sig.EncodeTo(e) - } + in.SatisfiedPolicy.EncodeTo(e) } // EncodeTo implements types.EncoderTo. @@ -1029,6 +1045,30 @@ func (p *SpendPolicy) DecodeFrom(d *Decoder) { d.SetErr(err) } +// DecodeFrom implements types.DecoderFrom. +func (sp *SatisfiedPolicy) DecodeFrom(d *Decoder) { + sp.Policy.DecodeFrom(d) + + var rec func(SpendPolicy) + rec = func(p SpendPolicy) { + switch p := p.Type.(type) { + case PolicyTypePublicKey: + var s Signature + s.DecodeFrom(d) + sp.Signatures = append(sp.Signatures, s) + case PolicyTypeHash: + sp.Preimages = append(sp.Preimages, d.ReadBytes()) + case PolicyTypeThreshold: + for i := range p.Of { + rec(p.Of[i]) + } + default: + // nothing to do + } + } + rec(sp.Policy) +} + // DecodeFrom implements types.DecoderFrom. func (se *StateElement) DecodeFrom(d *Decoder) { se.ID.DecodeFrom(d) @@ -1042,11 +1082,7 @@ func (se *StateElement) DecodeFrom(d *Decoder) { // DecodeFrom implements types.DecoderFrom. func (in *V2SiacoinInput) DecodeFrom(d *Decoder) { in.Parent.DecodeFrom(d) - in.SpendPolicy.DecodeFrom(d) - in.Signatures = make([]Signature, d.ReadPrefix()) - for i := range in.Signatures { - in.Signatures[i].DecodeFrom(d) - } + in.SatisfiedPolicy.DecodeFrom(d) } // DecodeFrom implements types.DecoderFrom. @@ -1066,11 +1102,7 @@ func (sce *SiacoinElement) DecodeFrom(d *Decoder) { func (in *V2SiafundInput) DecodeFrom(d *Decoder) { in.Parent.DecodeFrom(d) in.ClaimAddress.DecodeFrom(d) - in.SpendPolicy.DecodeFrom(d) - in.Signatures = make([]Signature, d.ReadPrefix()) - for i := range in.Signatures { - in.Signatures[i].DecodeFrom(d) - } + in.SatisfiedPolicy.DecodeFrom(d) } // DecodeFrom implements types.DecoderFrom. diff --git a/types/policy.go b/types/policy.go index 8f672e05..b626aafd 100644 --- a/types/policy.go +++ b/types/policy.go @@ -2,12 +2,15 @@ package types import ( "bytes" + "crypto/sha256" "encoding/hex" + "encoding/json" "errors" "fmt" "io" "strconv" "strings" + "time" ) // A SpendPolicy describes the conditions under which an input may be spent. @@ -24,6 +27,15 @@ func PolicyAbove(height uint64) SpendPolicy { return SpendPolicy{PolicyTypeAbove(height)} } +// PolicyTypeAfter requires the input to be spent after a given timestamp. +type PolicyTypeAfter time.Time + +// PolicyAfter returns a policy that requires the input to be spent after a +// given timestamp. +func PolicyAfter(t time.Time) SpendPolicy { + return SpendPolicy{PolicyTypeAfter(t)} +} + // PolicyTypePublicKey requires the input to be signed by a given key. type PolicyTypePublicKey PublicKey @@ -33,6 +45,15 @@ func PolicyPublicKey(pk PublicKey) SpendPolicy { return SpendPolicy{PolicyTypePublicKey(pk)} } +// PolicyTypeHash requires the input to reveal a SHA256 hash preimage. +type PolicyTypeHash Hash256 + +// PolicyHash returns a policy that requires the input to reveal a SHA256 hash +// preimage. +func PolicyHash(h Hash256) SpendPolicy { + return SpendPolicy{PolicyTypeHash(h)} +} + // PolicyTypeThreshold requires at least N sub-policies to be satisfied. type PolicyTypeThreshold struct { N uint8 @@ -69,7 +90,9 @@ func AnyoneCanSpend() SpendPolicy { type PolicyTypeUnlockConditions UnlockConditions func (PolicyTypeAbove) isPolicy() {} +func (PolicyTypeAfter) isPolicy() {} func (PolicyTypePublicKey) isPolicy() {} +func (PolicyTypeHash) isPolicy() {} func (PolicyTypeThreshold) isPolicy() {} func (PolicyTypeOpaque) isPolicy() {} func (PolicyTypeUnlockConditions) isPolicy() {} @@ -97,14 +120,21 @@ func (p SpendPolicy) Address() Address { } // Verify verifies that p is satisfied by the supplied inputs. -func (p SpendPolicy) Verify(height uint64, sigHash Hash256, sigs []Signature) error { +func (p SpendPolicy) Verify(height uint64, medianTimestamp time.Time, sigHash Hash256, sigs []Signature, preimages [][]byte) error { nextSig := func() (sig Signature, ok bool) { if ok = len(sigs) > 0; ok { sig, sigs = sigs[0], sigs[1:] } return } + nextPreimage := func() (preimage []byte, ok bool) { + if ok = len(preimages) > 0; ok { + preimage, preimages = preimages[0], preimages[1:] + } + return + } errInvalidSignature := errors.New("invalid signature") + errInvalidPreimage := errors.New("invalid preimage") var verify func(SpendPolicy) error verify = func(p SpendPolicy) error { switch p := p.Type.(type) { @@ -113,17 +143,26 @@ func (p SpendPolicy) Verify(height uint64, sigHash Hash256, sigs []Signature) er return nil } return fmt.Errorf("height not above %v", uint64(p)) + case PolicyTypeAfter: + if medianTimestamp.After(time.Time(p)) { + return nil + } + return fmt.Errorf("median timestamp not after %v", time.Time(p)) case PolicyTypePublicKey: - sig, ok := nextSig() - if ok && PublicKey(p).VerifyHash(sigHash, sig) { + if sig, ok := nextSig(); ok && PublicKey(p).VerifyHash(sigHash, sig) { return nil } return errInvalidSignature + case PolicyTypeHash: + if preimage, ok := nextPreimage(); ok && p == sha256.Sum256(preimage) { + return nil + } + return errInvalidPreimage case PolicyTypeThreshold: for i := 0; i < len(p.Of) && p.N > 0 && len(p.Of[i:]) >= int(p.N); i++ { if _, ok := p.Of[i].Type.(PolicyTypeUnlockConditions); ok { return errors.New("unlock conditions cannot be sub-policies") - } else if err := verify(p.Of[i]); err == errInvalidSignature { + } else if err := verify(p.Of[i]); err == errInvalidSignature || err == errInvalidPreimage { return err // fatal; should have been opaque } else if err == nil { p.N-- @@ -156,7 +195,14 @@ func (p SpendPolicy) Verify(height uint64, sigHash Hash256, sigs []Signature) er panic("invalid policy type") // developer error } } - return verify(p) + if err := verify(p); err != nil { + return err + } else if len(sigs) > 0 { + return errors.New("superfluous signature(s)") + } else if len(preimages) > 0 { + return errors.New("superfluous preimage(s)") + } + return nil } // String implements fmt.Stringer. @@ -172,11 +218,21 @@ func (p SpendPolicy) String() string { sb.WriteString(strconv.FormatUint(uint64(p), 10)) sb.WriteByte(')') + case PolicyTypeAfter: + sb.WriteString("after(") + sb.WriteString(strconv.FormatInt(time.Time(p).Unix(), 10)) + sb.WriteByte(')') + case PolicyTypePublicKey: sb.WriteString("pk(") writeHex(p[:]) sb.WriteByte(')') + case PolicyTypeHash: + sb.WriteString("h(") + writeHex(p[:]) + sb.WriteByte(')') + case PolicyTypeThreshold: sb.WriteString("thresh(") sb.WriteString(strconv.FormatUint(uint64(p.N), 10)) @@ -251,15 +307,24 @@ func ParseSpendPolicy(s string) (SpendPolicy, error) { u, err = strconv.ParseUint(t, 10, bitSize) return } + parseTime := func() time.Time { + t := nextToken() + if err != nil { + return time.Time{} + } + var unix int64 + unix, err = strconv.ParseInt(t, 10, 64) + return time.Unix(unix, 0) + } parsePubkey := func() (pk PublicKey) { t := nextToken() if err != nil { return } else if len(t) != 66 { - err = fmt.Errorf("invalid pubkey length (%d)", len(t)) + err = fmt.Errorf("invalid hex string length (%d)", len(t)) return } else if t[:2] != "0x" { - err = fmt.Errorf("invalid pubkey prefix %q", t[:2]) + err = fmt.Errorf("invalid hex string prefix %q", t[:2]) return } _, err = hex.Decode(pk[:], []byte(t[2:])) @@ -273,8 +338,12 @@ func ParseSpendPolicy(s string) (SpendPolicy, error) { switch typ { case "above": return PolicyAbove(parseInt(64)) + case "after": + return PolicyAfter(parseTime()) case "pk": return PolicyPublicKey(parsePubkey()) + case "h": + return PolicyHash(Hash256(parsePubkey())) case "thresh": n := parseInt(8) consume(',') @@ -346,3 +415,44 @@ func (p SpendPolicy) MarshalJSON() ([]byte, error) { func (p *SpendPolicy) UnmarshalJSON(b []byte) (err error) { return p.UnmarshalText(bytes.Trim(b, `"`)) } + +// A SatisfiedPolicy pairs a policy with the signatures and preimages that +// satisfy it. +type SatisfiedPolicy struct { + Policy SpendPolicy + Signatures []Signature + Preimages [][]byte +} + +// MarshalJSON implements json.Marshaler. +func (sp SatisfiedPolicy) MarshalJSON() ([]byte, error) { + pre := make([]string, len(sp.Preimages)) + for i := range pre { + pre[i] = hex.EncodeToString(sp.Preimages[i]) + } + return json.Marshal(struct { + Policy SpendPolicy `json:"policy"` + Signatures []Signature `json:"signatures,omitempty"` + Preimages []string `json:"preimages,omitempty"` + }{sp.Policy, sp.Signatures, pre}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (sp *SatisfiedPolicy) UnmarshalJSON(b []byte) error { + var pre []string + err := json.Unmarshal(b, &struct { + Policy *SpendPolicy + Signatures *[]Signature + Preimages *[]string + }{&sp.Policy, &sp.Signatures, &pre}) + if err != nil { + return err + } + sp.Preimages = make([][]byte, len(pre)) + for i := range sp.Preimages { + if sp.Preimages[i], err = hex.DecodeString(pre[i]); err != nil { + return err + } + } + return nil +} diff --git a/types/policy_test.go b/types/policy_test.go index 8b4b8ccc..746e49f2 100644 --- a/types/policy_test.go +++ b/types/policy_test.go @@ -3,6 +3,7 @@ package types import ( "bytes" "testing" + "time" ) func roundtrip(from EncoderTo, to DecoderFrom) { @@ -197,7 +198,7 @@ func TestPolicyVerify(t *testing.T) { true, }, } { - if err := test.p.Verify(test.height, sigHash, test.sigs); err != nil && test.valid { + if err := test.p.Verify(test.height, time.Time{}, sigHash, test.sigs, nil); err != nil && test.valid { t.Fatal(err) } else if err == nil && !test.valid { t.Fatal("expected error") diff --git a/types/types.go b/types/types.go index 3fee2c0a..c34e2ea1 100644 --- a/types/types.go +++ b/types/types.go @@ -483,9 +483,8 @@ func (fc V2FileContract) MissedHostOutput() SiacoinOutput { // A V2SiacoinInput spends an unspent SiacoinElement in the state accumulator by // revealing its public key and signing the transaction. type V2SiacoinInput struct { - Parent SiacoinElement `json:"parent"` - SpendPolicy SpendPolicy `json:"spendPolicy"` - Signatures []Signature `json:"signatures"` + Parent SiacoinElement `json:"parent"` + SatisfiedPolicy SatisfiedPolicy `json:"satisfiedPolicy"` } // A V2SiafundInput spends an unspent SiafundElement in the state accumulator by @@ -493,10 +492,9 @@ type V2SiacoinInput struct { // ClaimAddress, specifying the recipient of the siacoins that were earned by // the SiafundElement. type V2SiafundInput struct { - Parent SiafundElement `json:"parent"` - ClaimAddress Address `json:"claimAddress"` - SpendPolicy SpendPolicy `json:"spendPolicy"` - Signatures []Signature `json:"signatures"` + Parent SiafundElement `json:"parent"` + ClaimAddress Address `json:"claimAddress"` + SatisfiedPolicy SatisfiedPolicy `json:"satisfiedPolicy"` } // A V2FileContractRevision updates the state of an existing file contract. @@ -769,13 +767,21 @@ func (txn *V2Transaction) DeepCopy() V2Transaction { c.SiacoinInputs = append([]V2SiacoinInput(nil), c.SiacoinInputs...) for i := range c.SiacoinInputs { c.SiacoinInputs[i].Parent.MerkleProof = append([]Hash256(nil), c.SiacoinInputs[i].Parent.MerkleProof...) - c.SiacoinInputs[i].Signatures = append([]Signature(nil), c.SiacoinInputs[i].Signatures...) + c.SiacoinInputs[i].SatisfiedPolicy.Signatures = append([]Signature(nil), c.SiacoinInputs[i].SatisfiedPolicy.Signatures...) + c.SiacoinInputs[i].SatisfiedPolicy.Preimages = append([][]byte(nil), c.SiacoinInputs[i].SatisfiedPolicy.Preimages...) + for j := range c.SiacoinInputs[i].SatisfiedPolicy.Preimages { + c.SiacoinInputs[i].SatisfiedPolicy.Preimages[j] = append([]byte(nil), c.SiacoinInputs[i].SatisfiedPolicy.Preimages[j]...) + } } c.SiacoinOutputs = append([]SiacoinOutput(nil), c.SiacoinOutputs...) c.SiafundInputs = append([]V2SiafundInput(nil), c.SiafundInputs...) for i := range c.SiafundInputs { c.SiafundInputs[i].Parent.MerkleProof = append([]Hash256(nil), c.SiafundInputs[i].Parent.MerkleProof...) - c.SiafundInputs[i].Signatures = append([]Signature(nil), c.SiafundInputs[i].Signatures...) + c.SiafundInputs[i].SatisfiedPolicy.Signatures = append([]Signature(nil), c.SiafundInputs[i].SatisfiedPolicy.Signatures...) + c.SiafundInputs[i].SatisfiedPolicy.Preimages = append([][]byte(nil), c.SiafundInputs[i].SatisfiedPolicy.Preimages...) + for j := range c.SiafundInputs[i].SatisfiedPolicy.Preimages { + c.SiafundInputs[i].SatisfiedPolicy.Preimages[j] = append([]byte(nil), c.SiafundInputs[i].SatisfiedPolicy.Preimages[j]...) + } } c.SiafundOutputs = append([]SiafundOutput(nil), c.SiafundOutputs...) c.FileContracts = append([]V2FileContract(nil), c.FileContracts...) @@ -954,11 +960,11 @@ func (uk UnlockKey) MarshalText() ([]byte, error) { func (uk *UnlockKey) UnmarshalText(b []byte) error { parts := bytes.Split(b, []byte(":")) if len(parts) != 2 { - return fmt.Errorf("decoding :: failed: wrong number of separators") + return fmt.Errorf("decoding : failed: wrong number of separators") } else if err := uk.Algorithm.UnmarshalText(parts[0]); err != nil { - return fmt.Errorf("decoding :: failed: %w", err) + return fmt.Errorf("decoding : failed: %w", err) } else if uk.Key, err = hex.DecodeString(string(parts[1])); err != nil { - return fmt.Errorf("decoding :: failed: %w", err) + return fmt.Errorf("decoding : failed: %w", err) } return nil } From c034b5bff828b8fcd6494c0e2e97740a25a6eac9 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 5 Sep 2023 18:35:08 -0400 Subject: [PATCH 38/53] chain: Add v2 txpool methods --- chain/manager.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/chain/manager.go b/chain/manager.go index dc36190b..b39340eb 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -796,6 +796,28 @@ func (m *Manager) PoolTransactions() []types.Transaction { return append([]types.Transaction(nil), m.txpool.txns...) } +// V2PoolTransaction returns the v2 transaction with the specified ID, if it is +// currently in the pool. +func (m *Manager) V2PoolTransaction(id types.TransactionID) (types.V2Transaction, bool) { + m.mu.Lock() + defer m.mu.Unlock() + m.revalidatePool() + i, ok := m.txpool.indices[id] + if !ok { + return types.V2Transaction{}, false + } + return m.txpool.v2txns[i], ok +} + +// V2PoolTransactions returns the v2 transactions currently in the txpool. Any +// prefix of the returned slice constitutes a valid transaction set. +func (m *Manager) V2PoolTransactions() []types.V2Transaction { + m.mu.Lock() + defer m.mu.Unlock() + m.revalidatePool() + return append([]types.V2Transaction(nil), m.txpool.v2txns...) +} + // RecommendedFee returns the recommended fee (per weight unit) to ensure a high // probability of inclusion in the next block. func (m *Manager) RecommendedFee() types.Currency { From b74ebc7db3da7d0df725d6445edb432ac9a4cc83 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 5 Sep 2023 18:37:41 -0400 Subject: [PATCH 39/53] types,consensus: Expose txn output IDs For some reason I thought these could be kept internal to consensus, but in practice you need them whenever you want to spend an ephemeral output. One quirk is that some of the methods are "pointlessly" defined on V2Transaction, despite not using the transaction data -- they only use the ID. Why not make these methods of TransactionID? Because then both Transaction and TransactionID would have a SiacoinOutputID method, and you'd Just Have To Know which one to use in which circumstance. Better to forcibly associate v1 IDs with v1 txns and v2 IDs with v2 txns. --- consensus/update.go | 35 ++++++------------ types/types.go | 90 ++++++++++++++++++++++++++++++++++++++------- 2 files changed, 89 insertions(+), 36 deletions(-) diff --git a/consensus/update.go b/consensus/update.go index cafbb26d..64c55b7e 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -453,24 +453,13 @@ func (ms *MidState) ApplyTransaction(txn types.Transaction, ts V1TransactionSupp // ApplyV2Transaction applies a v2 transaction to the MidState. func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { txid := txn.ID() - var elems uint64 - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - nextElement := func() types.StateElement { - h.Reset() - types.SpecifierElementID.EncodeTo(h.E) - txid.EncodeTo(h.E) - h.E.WriteUint64(elems) - elems++ - return types.StateElement{ID: h.Sum()} - } for _, sci := range txn.SiacoinInputs { ms.spendSiacoinElement(sci.Parent, txid) } - for _, sco := range txn.SiacoinOutputs { + for i, sco := range txn.SiacoinOutputs { ms.addSiacoinElement(types.SiacoinElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: types.Hash256(txn.SiacoinOutputID(txid, i))}, SiacoinOutput: sco, }) } @@ -478,21 +467,21 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { ms.spendSiafundElement(sfi.Parent, txid) claimPortion := ms.siafundPool.Sub(sfi.Parent.ClaimStart).Div64(ms.base.SiafundCount()).Mul64(sfi.Parent.SiafundOutput.Value) ms.addSiacoinElement(types.SiacoinElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: types.Hash256(types.SiafundOutputID(sfi.Parent.ID).ClaimOutputID())}, SiacoinOutput: types.SiacoinOutput{Value: claimPortion, Address: sfi.ClaimAddress}, MaturityHeight: ms.base.MaturityHeight(), }) } - for _, sfo := range txn.SiafundOutputs { + for i, sfo := range txn.SiafundOutputs { ms.addSiafundElement(types.SiafundElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: types.Hash256(txn.SiafundOutputID(txid, i))}, SiafundOutput: sfo, ClaimStart: ms.siafundPool, }) } - for _, fc := range txn.FileContracts { + for i, fc := range txn.FileContracts { ms.addV2FileContractElement(types.V2FileContractElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: types.Hash256(txn.V2FileContractID(txid, i))}, V2FileContract: fc, }) } @@ -511,7 +500,7 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { renter.Value = renter.Value.Sub(r.RenterRollover) host.Value = host.Value.Sub(r.HostRollover) ms.addV2FileContractElement(types.V2FileContractElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: types.Hash256(types.FileContractID(fce.ID).V2RenewalID())}, V2FileContract: r.InitialRevision, }) case *types.V2StorageProof: @@ -522,19 +511,19 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { renter, host = fc.RenterOutput, fc.MissedHostOutput() } ms.addSiacoinElement(types.SiacoinElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: types.Hash256(types.FileContractID(fce.ID).V2RenterOutputID())}, SiacoinOutput: renter, MaturityHeight: ms.base.MaturityHeight(), }) ms.addSiacoinElement(types.SiacoinElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: types.Hash256(types.FileContractID(fce.ID).V2HostOutputID())}, SiacoinOutput: host, MaturityHeight: ms.base.MaturityHeight(), }) } - for _, a := range txn.Attestations { + for i, a := range txn.Attestations { ms.addAttestationElement(types.AttestationElement{ - StateElement: nextElement(), + StateElement: types.StateElement{ID: txn.AttestationID(txid, i)}, Attestation: a, }) } diff --git a/types/types.go b/types/types.go index c34e2ea1..815c5ba2 100644 --- a/types/types.go +++ b/types/types.go @@ -45,9 +45,9 @@ var ( SpecifierClaimOutput = NewSpecifier("claim output") SpecifierFileContract = NewSpecifier("file contract") SpecifierStorageProof = NewSpecifier("storage proof") + SpecifierAttestation = NewSpecifier("attestation") SpecifierFoundation = NewSpecifier("foundation") SpecifierEntropy = NewSpecifier("entropy") - SpecifierElementID = NewSpecifier("element id") ) // A Hash256 is a generic 256-bit cryptographic hash. @@ -320,6 +320,38 @@ func (fcid FileContractID) MissedOutputID(i int) SiacoinOutputID { return SiacoinOutputID(h.Sum()) } +// V2RenterOutputID returns the ID of the renter output for a v2 contract. +func (fcid FileContractID) V2RenterOutputID() SiacoinOutputID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierSiacoinOutput.EncodeTo(h.E) + fcid.EncodeTo(h.E) + h.E.WriteUint64(0) + return SiacoinOutputID(h.Sum()) +} + +// V2HostOutputID returns the ID of the host output for a v2 contract. +func (fcid FileContractID) V2HostOutputID() SiacoinOutputID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierSiacoinOutput.EncodeTo(h.E) + fcid.EncodeTo(h.E) + h.E.WriteUint64(1) + return SiacoinOutputID(h.Sum()) +} + +// V2RenewalID returns the ID of the renewal of a v2 contract. +func (fcid FileContractID) V2RenewalID() FileContractID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierFileContract.EncodeTo(h.E) + fcid.EncodeTo(h.E) + return FileContractID(h.Sum()) +} + // A FileContractRevision updates the state of an existing file contract. type FileContractRevision struct { ParentID FileContractID `json:"parentID"` @@ -725,18 +757,56 @@ func (txn *V2Transaction) ID() TransactionID { return TransactionID(h.Sum()) } -// EphemeralSiacoinOutput returns a SiacoinElement for the siacoin output at -// index i. -func (txn *V2Transaction) EphemeralSiacoinOutput(i int) SiacoinElement { +// SiacoinOutputID returns the ID for the siacoin output at index i. +func (*V2Transaction) SiacoinOutputID(txid TransactionID, i int) SiacoinOutputID { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) h.Reset() SpecifierSiacoinOutput.EncodeTo(h.E) - txn.ID().EncodeTo(h.E) + txid.EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return SiacoinOutputID(h.Sum()) +} + +// SiafundOutputID returns the ID for the siafund output at index i. +func (*V2Transaction) SiafundOutputID(txid TransactionID, i int) SiafundOutputID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierSiafundOutput.EncodeTo(h.E) + txid.EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return SiafundOutputID(h.Sum()) +} + +// V2FileContractID returns the ID for the v2 file contract at index i. +func (*V2Transaction) V2FileContractID(txid TransactionID, i int) FileContractID { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierFileContract.EncodeTo(h.E) + txid.EncodeTo(h.E) + h.E.WriteUint64(uint64(i)) + return FileContractID(h.Sum()) +} + +// AttestationID returns the ID for the attestation at index i. +func (*V2Transaction) AttestationID(txid TransactionID, i int) Hash256 { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + SpecifierAttestation.EncodeTo(h.E) + txid.EncodeTo(h.E) h.E.WriteUint64(uint64(i)) + return h.Sum() +} + +// EphemeralSiacoinOutput returns a SiacoinElement for the siacoin output at +// index i. +func (txn *V2Transaction) EphemeralSiacoinOutput(i int) SiacoinElement { return SiacoinElement{ StateElement: StateElement{ - ID: h.Sum(), + ID: Hash256(txn.SiacoinOutputID(txn.ID(), i)), LeafIndex: EphemeralLeafIndex, }, SiacoinOutput: txn.SiacoinOutputs[i], @@ -746,15 +816,9 @@ func (txn *V2Transaction) EphemeralSiacoinOutput(i int) SiacoinElement { // EphemeralSiafundOutput returns a SiafundElement for the siafund output at // index i. func (txn *V2Transaction) EphemeralSiafundOutput(i int) SiafundElement { - h := hasherPool.Get().(*Hasher) - defer hasherPool.Put(h) - h.Reset() - SpecifierSiafundOutput.EncodeTo(h.E) - txn.ID().EncodeTo(h.E) - h.E.WriteUint64(uint64(i)) return SiafundElement{ StateElement: StateElement{ - ID: h.Sum(), + ID: Hash256(txn.SiafundOutputID(txn.ID(), i)), LeafIndex: EphemeralLeafIndex, }, SiafundOutput: txn.SiafundOutputs[i], From ad263434ebac28f97715a768175d2db599ea72a8 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 5 Sep 2023 18:44:55 -0400 Subject: [PATCH 40/53] consensus: Track contract resolutiosn in MidState --- chain/db.go | 4 ++-- chain/manager.go | 18 ++++++++++++++---- consensus/update.go | 37 ++++++++++++++++++++++++------------ consensus/validation.go | 12 ++++++++---- consensus/validation_test.go | 2 +- types/encoding.go | 4 ++-- 6 files changed, 52 insertions(+), 25 deletions(-) diff --git a/chain/db.go b/chain/db.go index b4a53b99..4643b7a4 100644 --- a/chain/db.go +++ b/chain/db.go @@ -383,7 +383,7 @@ func (db *DBStore) applyElements(cau consensus.ApplyUpdate) { } db.putElementProof(sfe.StateElement) }) - cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool) { + cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { if resolved { db.deleteFileContractElement(types.FileContractID(fce.ID)) db.deleteFileContractExpiration(types.FileContractID(fce.ID), fce.FileContract.WindowEnd) @@ -402,7 +402,7 @@ func (db *DBStore) applyElements(cau consensus.ApplyUpdate) { } func (db *DBStore) revertElements(cru consensus.RevertUpdate) { - cru.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool) { + cru.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { if resolved { // contract no longer resolved; restore it db.putFileContractElement(fce) diff --git a/chain/manager.go b/chain/manager.go index b39340eb..44fe5d9e 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -668,8 +668,13 @@ func (m *Manager) applyPoolUpdate(cau consensus.ApplyUpdate) { newElements[sfe.ID] = sfe.StateElement } }) - cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, spent bool) { - if !spent { + cau.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { + if !resolved { + newElements[fce.ID] = fce.StateElement + } + }) + cau.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType) { + if res != nil { newElements[fce.ID] = fce.StateElement } }) @@ -729,8 +734,13 @@ func (m *Manager) revertPoolUpdate(cru consensus.RevertUpdate) { uncreated[sfe.ID] = sfe.StateElement } }) - cru.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, spent bool) { - if !spent { + cru.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { + if !resolved { + uncreated[fce.ID] = fce.StateElement + } + }) + cru.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType) { + if res != nil { uncreated[fce.ID] = fce.StateElement } }) diff --git a/consensus/update.go b/consensus/update.go index 64c55b7e..c085895c 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -349,7 +349,8 @@ func (ms *MidState) reviseFileContractElement(fce types.FileContractElement, rev } } -func (ms *MidState) resolveFileContractElement(fce types.FileContractElement, txid types.TransactionID) { +func (ms *MidState) resolveFileContractElement(fce types.FileContractElement, valid bool, txid types.TransactionID) { + ms.res[fce.ID] = valid ms.spends[fce.ID] = txid fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.fces = append(ms.fces, fce) @@ -379,7 +380,8 @@ func (ms *MidState) reviseV2FileContractElement(fce types.V2FileContractElement, } } -func (ms *MidState) resolveV2FileContractElement(fce types.V2FileContractElement, txid types.TransactionID) { +func (ms *MidState) resolveV2FileContractElement(fce types.V2FileContractElement, res types.V2FileContractResolutionType, txid types.TransactionID) { + ms.v2res[fce.ID] = res ms.spends[fce.ID] = txid fce.MerkleProof = append([]types.Hash256(nil), fce.MerkleProof...) ms.v2fces = append(ms.v2fces, fce) @@ -429,7 +431,7 @@ func (ms *MidState) ApplyTransaction(txn types.Transaction, ts V1TransactionSupp } for _, sp := range txn.StorageProofs { fce := ms.mustFileContractElement(ts, sp.ParentID) - ms.resolveFileContractElement(fce, txid) + ms.resolveFileContractElement(fce, true, txid) for i, sco := range fce.FileContract.ValidProofOutputs { ms.addSiacoinElement(types.SiacoinElement{ StateElement: types.StateElement{ID: types.Hash256(sp.ParentID.ValidOutputID(i))}, @@ -489,7 +491,7 @@ func (ms *MidState) ApplyV2Transaction(txn types.V2Transaction) { ms.reviseV2FileContractElement(fcr.Parent, fcr.Revision) } for _, fcr := range txn.FileContractResolutions { - ms.resolveV2FileContractElement(fcr.Parent, txid) + ms.resolveV2FileContractElement(fcr.Parent, fcr.Resolution, txid) fce := fcr.Parent fc := fce.V2FileContract @@ -560,7 +562,7 @@ func (ms *MidState) ApplyBlock(b types.Block, bs V1BlockSupplement) { if ms.isSpent(fce.ID) { continue } - ms.resolveFileContractElement(fce, types.TransactionID(bid)) + ms.resolveFileContractElement(fce, false, types.TransactionID(bid)) for i, sco := range fce.FileContract.MissedProofOutputs { ms.addSiacoinElement(types.SiacoinElement{ StateElement: types.StateElement{ID: types.Hash256(types.FileContractID(fce.ID).MissedOutputID(i))}, @@ -625,17 +627,18 @@ func (au ApplyUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, sp // ForEachFileContractElement calls fn on each file contract element related to // au. If the contract was revised, rev is non-nil. -func (au ApplyUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { +func (au ApplyUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool)) { for _, fce := range au.ms.fces { - fn(fce, au.ms.revs[fce.ID], au.ms.isSpent(fce.ID)) + fn(fce, au.ms.revs[fce.ID], au.ms.isSpent(fce.ID), au.ms.res[fce.ID]) } } // ForEachV2FileContractElement calls fn on each V2 file contract element -// related to au. If the contract was revised, rev is non-nil. -func (au ApplyUpdate) ForEachV2FileContractElement(fn func(fce types.V2FileContractElement, rev *types.V2FileContractElement, resolved bool)) { +// related to au. If the contract was revised, rev is non-nil. If the contract +// was resolved, res is non-nil. +func (au ApplyUpdate) ForEachV2FileContractElement(fn func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType)) { for _, fce := range au.ms.v2fces { - fn(fce, au.ms.v2revs[fce.ID], au.ms.isSpent(fce.ID)) + fn(fce, au.ms.v2revs[fce.ID], au.ms.v2res[fce.ID]) } } @@ -697,10 +700,20 @@ func (ru RevertUpdate) ForEachSiafundElement(fn func(sfe types.SiafundElement, s // ForEachFileContractElement calls fn on each file contract element related to // ru. If the contract was revised, rev is non-nil. -func (ru RevertUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved bool)) { +func (ru RevertUpdate) ForEachFileContractElement(fn func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool)) { for i := range ru.ms.fces { fce := ru.ms.fces[len(ru.ms.fces)-i-1] - fn(fce, ru.ms.revs[fce.ID], ru.ms.isSpent(fce.ID)) + fn(fce, ru.ms.revs[fce.ID], ru.ms.isSpent(fce.ID), ru.ms.res[fce.ID]) + } +} + +// ForEachV2FileContractElement calls fn on each V2 file contract element +// related to au. If the contract was revised, rev is non-nil. If the contract +// was resolved, res is non-nil. +func (ru RevertUpdate) ForEachV2FileContractElement(fn func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType)) { + for i := range ru.ms.v2fces { + fce := ru.ms.v2fces[len(ru.ms.fces)-i-1] + fn(fce, ru.ms.v2revs[fce.ID], ru.ms.v2res[fce.ID]) } } diff --git a/consensus/validation.go b/consensus/validation.go index 17649319..212b5d85 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -109,7 +109,9 @@ type MidState struct { ephemeral map[types.Hash256]int // indices into element slices spends map[types.Hash256]types.TransactionID revs map[types.Hash256]*types.FileContractElement + res map[types.Hash256]bool v2revs map[types.Hash256]*types.V2FileContractElement + v2res map[types.Hash256]types.V2FileContractResolutionType siafundPool types.Currency foundationPrimary types.Address foundationFailsafe types.Address @@ -190,7 +192,9 @@ func NewMidState(s State) *MidState { ephemeral: make(map[types.Hash256]int), spends: make(map[types.Hash256]types.TransactionID), revs: make(map[types.Hash256]*types.FileContractElement), + res: make(map[types.Hash256]bool), v2revs: make(map[types.Hash256]*types.V2FileContractElement), + v2res: make(map[types.Hash256]types.V2FileContractResolutionType), siafundPool: s.SiafundPool, foundationPrimary: s.FoundationPrimaryAddress, foundationFailsafe: s.FoundationFailsafeAddress, @@ -639,8 +643,8 @@ func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { for _, fc := range txn.FileContractRevisions { addContract(fc.Revision) } - for i, res := range txn.FileContractResolutions { - switch r := res.Resolution.(type) { + for i, fcr := range txn.FileContractResolutions { + switch r := fcr.Resolution.(type) { case *types.V2FileContractRenewal: if r.InitialRevision.RenterOutput.Value.IsZero() && r.InitialRevision.HostOutput.Value.IsZero() { return fmt.Errorf("file contract renewal %v creates contract with zero value", i) @@ -697,8 +701,8 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { for _, fc := range txn.FileContracts { outputSum = outputSum.Add(fc.RenterOutput.Value).Add(fc.HostOutput.Value).Add(ms.base.V2FileContractTax(fc)) } - for _, res := range txn.FileContractResolutions { - if r, ok := res.Resolution.(*types.V2FileContractRenewal); ok { + for _, fcr := range txn.FileContractResolutions { + if r, ok := fcr.Resolution.(*types.V2FileContractRenewal); ok { // a renewal creates a new contract, optionally "rolling over" funds // from the old contract inputSum = inputSum.Add(r.RenterRollover) diff --git a/consensus/validation_test.go b/consensus/validation_test.go index ecad00ef..9fa494e9 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -507,7 +507,7 @@ func TestValidateV2Block(t *testing.T) { sfes = append(sfes, sfe) }) var fces []types.V2FileContractElement - cau.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, resolved bool) { + cau.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType) { fces = append(fces, fce) }) diff --git a/types/encoding.go b/types/encoding.go index 3f10957f..5fcd9801 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -693,8 +693,8 @@ func (txn V2Transaction) EncodeTo(e *Encoder) { } if fields&(1<<6) != 0 { e.WritePrefix(len(txn.FileContractResolutions)) - for _, res := range txn.FileContractResolutions { - res.EncodeTo(e) + for _, fcr := range txn.FileContractResolutions { + fcr.EncodeTo(e) } } if fields&(1<<7) != 0 { From 83ec544bad2855fe92665184de32fff3c605c771 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 5 Sep 2023 18:48:41 -0400 Subject: [PATCH 41/53] consensus: Move MidState to state.go --- consensus/state.go | 93 ++++++++++++++++++++++++++++++++++++++ consensus/validation.go | 98 ----------------------------------------- 2 files changed, 93 insertions(+), 98 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 16df7ffd..d1625eef 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -633,6 +633,99 @@ func (s State) AttestationSigHash(a types.Attestation) types.Hash256 { return h.Sum() } +// A MidState represents the state of the chain within a block. +type MidState struct { + base State + ephemeral map[types.Hash256]int // indices into element slices + spends map[types.Hash256]types.TransactionID + revs map[types.Hash256]*types.FileContractElement + res map[types.Hash256]bool + v2revs map[types.Hash256]*types.V2FileContractElement + v2res map[types.Hash256]types.V2FileContractResolutionType + siafundPool types.Currency + foundationPrimary types.Address + foundationFailsafe types.Address + + // elements updated/added by block + sces []types.SiacoinElement + sfes []types.SiafundElement + fces []types.FileContractElement + v2fces []types.V2FileContractElement + aes []types.AttestationElement + cie types.ChainIndexElement +} + +func (ms *MidState) siacoinElement(ts V1TransactionSupplement, id types.SiacoinOutputID) (types.SiacoinElement, bool) { + if i, ok := ms.ephemeral[types.Hash256(id)]; ok { + return ms.sces[i], true + } + return ts.siacoinElement(id) +} + +func (ms *MidState) siafundElement(ts V1TransactionSupplement, id types.SiafundOutputID) (types.SiafundElement, bool) { + if i, ok := ms.ephemeral[types.Hash256(id)]; ok { + return ms.sfes[i], true + } + return ts.siafundElement(id) +} + +func (ms *MidState) fileContractElement(ts V1TransactionSupplement, id types.FileContractID) (types.FileContractElement, bool) { + if i, ok := ms.ephemeral[types.Hash256(id)]; ok { + return ms.fces[i], true + } + return ts.fileContractElement(id) +} + +func (ms *MidState) mustSiacoinElement(ts V1TransactionSupplement, id types.SiacoinOutputID) types.SiacoinElement { + sce, ok := ms.siacoinElement(ts, id) + if !ok { + panic("missing SiacoinElement") + } + return sce +} + +func (ms *MidState) mustSiafundElement(ts V1TransactionSupplement, id types.SiafundOutputID) types.SiafundElement { + sfe, ok := ms.siafundElement(ts, id) + if !ok { + panic("missing SiafundElement") + } + return sfe +} + +func (ms *MidState) mustFileContractElement(ts V1TransactionSupplement, id types.FileContractID) types.FileContractElement { + fce, ok := ms.fileContractElement(ts, id) + if !ok { + panic("missing FileContractElement") + } + return fce +} + +func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { + txid, ok := ms.spends[id] + return txid, ok +} + +func (ms *MidState) isSpent(id types.Hash256) bool { + _, ok := ms.spends[id] + return ok +} + +// NewMidState constructs a MidState initialized to the provided base state. +func NewMidState(s State) *MidState { + return &MidState{ + base: s, + ephemeral: make(map[types.Hash256]int), + spends: make(map[types.Hash256]types.TransactionID), + revs: make(map[types.Hash256]*types.FileContractElement), + res: make(map[types.Hash256]bool), + v2revs: make(map[types.Hash256]*types.V2FileContractElement), + v2res: make(map[types.Hash256]types.V2FileContractResolutionType), + siafundPool: s.SiafundPool, + foundationPrimary: s.FoundationPrimaryAddress, + foundationFailsafe: s.FoundationFailsafeAddress, + } +} + // A V1TransactionSupplement contains elements that are associated with a v1 // transaction, but not included in the transaction. For example, v1 // transactions reference the ID of each SiacoinOutput they spend, but do not diff --git a/consensus/validation.go b/consensus/validation.go index 212b5d85..460b9d0f 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -103,104 +103,6 @@ func ValidateOrphan(s State, b types.Block) error { return nil } -// A MidState represents the state of the blockchain within a block. -type MidState struct { - base State - ephemeral map[types.Hash256]int // indices into element slices - spends map[types.Hash256]types.TransactionID - revs map[types.Hash256]*types.FileContractElement - res map[types.Hash256]bool - v2revs map[types.Hash256]*types.V2FileContractElement - v2res map[types.Hash256]types.V2FileContractResolutionType - siafundPool types.Currency - foundationPrimary types.Address - foundationFailsafe types.Address - - // elements that have been updated or added by the block - sces []types.SiacoinElement - sfes []types.SiafundElement - fces []types.FileContractElement - v2fces []types.V2FileContractElement - aes []types.AttestationElement - cie types.ChainIndexElement -} - -// Index returns the index of the MidState's base state. -func (ms *MidState) Index() types.ChainIndex { - return ms.base.Index -} - -func (ms *MidState) siacoinElement(ts V1TransactionSupplement, id types.SiacoinOutputID) (types.SiacoinElement, bool) { - if i, ok := ms.ephemeral[types.Hash256(id)]; ok { - return ms.sces[i], true - } - return ts.siacoinElement(id) -} - -func (ms *MidState) siafundElement(ts V1TransactionSupplement, id types.SiafundOutputID) (types.SiafundElement, bool) { - if i, ok := ms.ephemeral[types.Hash256(id)]; ok { - return ms.sfes[i], true - } - return ts.siafundElement(id) -} - -func (ms *MidState) fileContractElement(ts V1TransactionSupplement, id types.FileContractID) (types.FileContractElement, bool) { - if i, ok := ms.ephemeral[types.Hash256(id)]; ok { - return ms.fces[i], true - } - return ts.fileContractElement(id) -} - -func (ms *MidState) mustSiacoinElement(ts V1TransactionSupplement, id types.SiacoinOutputID) types.SiacoinElement { - sce, ok := ms.siacoinElement(ts, id) - if !ok { - panic("missing SiacoinElement") - } - return sce -} - -func (ms *MidState) mustSiafundElement(ts V1TransactionSupplement, id types.SiafundOutputID) types.SiafundElement { - sfe, ok := ms.siafundElement(ts, id) - if !ok { - panic("missing SiafundElement") - } - return sfe -} - -func (ms *MidState) mustFileContractElement(ts V1TransactionSupplement, id types.FileContractID) types.FileContractElement { - fce, ok := ms.fileContractElement(ts, id) - if !ok { - panic("missing FileContractElement") - } - return fce -} - -func (ms *MidState) spent(id types.Hash256) (types.TransactionID, bool) { - txid, ok := ms.spends[id] - return txid, ok -} - -func (ms *MidState) isSpent(id types.Hash256) bool { - _, ok := ms.spends[id] - return ok -} - -// NewMidState constructs a MidState initialized to the provided base state. -func NewMidState(s State) *MidState { - return &MidState{ - base: s, - ephemeral: make(map[types.Hash256]int), - spends: make(map[types.Hash256]types.TransactionID), - revs: make(map[types.Hash256]*types.FileContractElement), - res: make(map[types.Hash256]bool), - v2revs: make(map[types.Hash256]*types.V2FileContractElement), - v2res: make(map[types.Hash256]types.V2FileContractResolutionType), - siafundPool: s.SiafundPool, - foundationPrimary: s.FoundationPrimaryAddress, - foundationFailsafe: s.FoundationFailsafeAddress, - } -} - func validateCurrencyOverflow(ms *MidState, txn types.Transaction) error { // Check that the sum of all currency values in the transaction will not // overflow our 128-bit representation. This allows us to safely add From 446cae66c5d0a702baf3d8ee247343466383e67a Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 11 Sep 2023 10:55:06 -0400 Subject: [PATCH 42/53] types: Add missing SpendPolicy types to Encoder --- types/encoding.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/types/encoding.go b/types/encoding.go index 5fcd9801..33d6f996 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -442,7 +442,9 @@ func (p SpendPolicy) encodePolicy(e *Encoder) { const ( opInvalid = iota opAbove + opAfter opPublicKey + opHash opThreshold opOpaque opUnlockConditions @@ -451,9 +453,15 @@ func (p SpendPolicy) encodePolicy(e *Encoder) { case PolicyTypeAbove: e.WriteUint8(opAbove) e.WriteUint64(uint64(p)) + case PolicyTypeAfter: + e.WriteUint8(opAfter) + e.WriteTime(time.Time(p)) case PolicyTypePublicKey: e.WriteUint8(opPublicKey) PublicKey(p).EncodeTo(e) + case PolicyTypeHash: + e.WriteUint8(opHash) + Hash256(p).EncodeTo(e) case PolicyTypeThreshold: e.WriteUint8(opThreshold) e.WriteUint8(p.N) @@ -994,7 +1002,9 @@ func (p *SpendPolicy) DecodeFrom(d *Decoder) { const ( opInvalid = iota opAbove + opAfter opPublicKey + opHash opThreshold opOpaque opUnlockConditions @@ -1006,10 +1016,16 @@ func (p *SpendPolicy) DecodeFrom(d *Decoder) { switch op := d.ReadUint8(); op { case opAbove: return PolicyAbove(d.ReadUint64()), nil + case opAfter: + return PolicyAfter(d.ReadTime()), nil case opPublicKey: var pk PublicKey pk.DecodeFrom(d) return PolicyPublicKey(pk), nil + case opHash: + var h Hash256 + h.DecodeFrom(d) + return PolicyHash(h), nil case opThreshold: n := d.ReadUint8() of := make([]SpendPolicy, d.ReadUint8()) From 96643ee9298876e928094c964c852338fe24854c Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 12 Sep 2023 14:39:45 -0400 Subject: [PATCH 43/53] types,consensus: Fix broken tests --- consensus/validation.go | 36 ++++++++++++++++++---- consensus/validation_test.go | 58 ++++++++++++++++-------------------- types/hash.go | 2 +- types/policy_test.go | 37 ++++++++++++++++++----- 4 files changed, 86 insertions(+), 47 deletions(-) diff --git a/consensus/validation.go b/consensus/validation.go index 460b9d0f..f49495ad 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -415,28 +415,38 @@ func validateArbitraryData(ms *MidState, txn types.Transaction) error { func validateSignatures(ms *MidState, txn types.Transaction) error { // build a map of all outstanding signatures // - // NOTE: siad checks for double-spends here, but this is redundant + // NOTE: we also check for intra-transaction double-spends here type sigMapEntry struct { need uint64 keys []types.UnlockKey used []bool } sigMap := make(map[types.Hash256]*sigMapEntry) - addEntry := func(id types.Hash256, uc types.UnlockConditions) { + addEntry := func(id types.Hash256, uc types.UnlockConditions) bool { + if _, ok := sigMap[id]; ok { + return false + } sigMap[id] = &sigMapEntry{ need: uc.SignaturesRequired, keys: uc.PublicKeys, used: make([]bool, len(uc.PublicKeys)), } + return true } for _, sci := range txn.SiacoinInputs { - addEntry(types.Hash256(sci.ParentID), sci.UnlockConditions) + if !addEntry(types.Hash256(sci.ParentID), sci.UnlockConditions) { + return fmt.Errorf("transaction spends siacoin input %v more than once", sci.ParentID) + } } for _, sfi := range txn.SiafundInputs { - addEntry(types.Hash256(sfi.ParentID), sfi.UnlockConditions) + if !addEntry(types.Hash256(sfi.ParentID), sfi.UnlockConditions) { + return fmt.Errorf("transaction spends siafund input %v more than once", sfi.ParentID) + } } for _, fcr := range txn.FileContractRevisions { - addEntry(types.Hash256(fcr.ParentID), fcr.UnlockConditions) + if !addEntry(types.Hash256(fcr.ParentID), fcr.UnlockConditions) { + return fmt.Errorf("transaction revises file contract %v more than once", fcr.ParentID) + } } for i, sig := range txn.Signatures { @@ -567,10 +577,14 @@ func validateV2CurrencyValues(ms *MidState, txn types.V2Transaction) error { func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { sigHash := ms.base.InputSigHash(txn) + spent := make(map[types.Hash256]int) for i, sci := range txn.SiacoinInputs { if txid, ok := ms.spent(sci.Parent.ID); ok { return fmt.Errorf("siacoin input %v double-spends parent output (previously spent in %v)", i, txid) + } else if j, ok := spent[sci.Parent.ID]; ok { + return fmt.Errorf("siacoin input %v double-spends parent output (previously spent by input %v)", i, j) } + spent[sci.Parent.ID] = i // check accumulator if sci.Parent.LeafIndex == types.EphemeralLeafIndex { @@ -624,10 +638,14 @@ func validateV2Siacoins(ms *MidState, txn types.V2Transaction) error { func validateV2Siafunds(ms *MidState, txn types.V2Transaction) error { sigHash := ms.base.InputSigHash(txn) + spent := make(map[types.Hash256]int) for i, sfi := range txn.SiafundInputs { if txid, ok := ms.spent(sfi.Parent.ID); ok { return fmt.Errorf("siafund input %v double-spends parent output (previously spent in %v)", i, txid) + } else if j, ok := spent[sfi.Parent.ID]; ok { + return fmt.Errorf("siafund input %v double-spends parent output (previously spent by input %v)", i, j) } + spent[sfi.Parent.ID] = i // check accumulator if sfi.Parent.LeafIndex == types.EphemeralLeafIndex { @@ -674,9 +692,15 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { return errors.New("transaction both resolves a file contract and creates new outputs") } + revised := make(map[types.Hash256]int) + resolved := make(map[types.Hash256]int) validateParent := func(fce types.V2FileContractElement) error { if txid, ok := ms.spent(fce.ID); ok { return fmt.Errorf("has already been resolved in transaction %v", txid) + } else if i, ok := revised[fce.ID]; ok { + return fmt.Errorf("has already been revised by contract revision %v", i) + } else if i, ok := resolved[fce.ID]; ok { + return fmt.Errorf("has already been resolved by contract resolution %v", i) } else if !ms.base.Elements.containsUnresolvedV2FileContractElement(fce) { if ms.base.Elements.containsResolvedV2FileContractElement(fce) { return errors.New("has already been resolved in a previous block") @@ -753,6 +777,7 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { } else if err := validateRevision(cur, rev); err != nil { return fmt.Errorf("file contract revision %v %s", i, err) } + revised[fcr.Parent.ID] = i } for i, fcr := range txn.FileContractResolutions { @@ -814,6 +839,7 @@ func validateV2FileContracts(ms *MidState, txn types.V2Transaction) error { return fmt.Errorf("file contract expiration %v cannot be submitted until after expiration height (%v) ", i, fc.ExpirationHeight) } } + resolved[fcr.Parent.ID] = i } return nil diff --git a/consensus/validation_test.go b/consensus/validation_test.go index 9fa494e9..702e25d9 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -399,6 +399,14 @@ func TestValidateBlock(t *testing.T) { }) }, }, + { + "double-spent input", + func(b *types.Block) { + txn := &b.Transactions[0] + txn.SiacoinInputs = append(txn.SiacoinInputs, txn.SiacoinInputs[0]) + txn.SiacoinOutputs[0].Value = txn.SiacoinOutputs[0].Value.Add(types.Siacoins(100)) + }, + }, { "conflicting revisions in same transaction", func(b *types.Block) { @@ -452,15 +460,11 @@ func TestValidateV2Block(t *testing.T) { txn.FileContractRevisions[i].Revision.RenterSignature = renterPrivateKey.SignHash(cs.ContractSigHash(txn.FileContractRevisions[i].Revision)) txn.FileContractRevisions[i].Revision.HostSignature = hostPrivateKey.SignHash(cs.ContractSigHash(txn.FileContractRevisions[i].Revision)) } - sp := types.SatisfiedPolicy{ - Policy: giftPolicy, - Signatures: []types.Signature{giftPrivateKey.SignHash(cs.InputSigHash(*txn))}, - } for i := range txn.SiacoinInputs { - txn.SiacoinInputs[i].SatisfiedPolicy = sp + txn.SiacoinInputs[i].SatisfiedPolicy.Signatures = []types.Signature{giftPrivateKey.SignHash(cs.InputSigHash(*txn))} } for i := range txn.SiafundInputs { - txn.SiafundInputs[i].SatisfiedPolicy = sp + txn.SiafundInputs[i].SatisfiedPolicy.Signatures = []types.Signature{giftPrivateKey.SignHash(cs.InputSigHash(*txn))} } } @@ -523,8 +527,6 @@ func TestValidateV2Block(t *testing.T) { rev1 := v2GiftFC rev1.RevisionNumber++ - rev2 := rev1 - rev2.RevisionNumber++ minerFee := types.Siacoins(1) b := types.Block{ ParentID: genesisBlock.ID(), @@ -533,11 +535,13 @@ func TestValidateV2Block(t *testing.T) { Height: 1, Transactions: []types.V2Transaction{{ SiacoinInputs: []types.V2SiacoinInput{{ - Parent: sces[0], + Parent: sces[0], + SatisfiedPolicy: types.SatisfiedPolicy{Policy: giftPolicy}, }}, SiafundInputs: []types.V2SiafundInput{{ - Parent: sfes[0], - ClaimAddress: types.VoidAddress, + Parent: sfes[0], + ClaimAddress: types.VoidAddress, + SatisfiedPolicy: types.SatisfiedPolicy{Policy: giftPolicy}, }}, SiacoinOutputs: []types.SiacoinOutput{ {Value: giftAmountSC.Sub(minerFee).Sub(difference), Address: giftAddress}, @@ -548,14 +552,7 @@ func TestValidateV2Block(t *testing.T) { }, FileContracts: []types.V2FileContract{fc}, FileContractRevisions: []types.V2FileContractRevision{ - { - Parent: fces[0], - Revision: rev1, - }, - { - Parent: fces[0], - Revision: rev2, - }, + {Parent: fces[0], Revision: rev1}, }, MinerFee: minerFee, }}, @@ -716,20 +713,6 @@ func TestValidateV2Block(t *testing.T) { txn.SiafundInputs[0].SatisfiedPolicy.Policy = types.AnyoneCanSpend() }, }, - { - "siacoin input claiming invalid policy", - func(b *types.Block) { - txn := &b.V2.Transactions[0] - txn.SiacoinInputs[0].SatisfiedPolicy.Signatures[0][0] ^= 1 - }, - }, - { - "siafund input claiming invalid policy", - func(b *types.Block) { - txn := &b.V2.Transactions[0] - txn.SiafundInputs[0].SatisfiedPolicy.Signatures[0][0] ^= 1 - }, - }, { "invalid FoundationAddressUpdate", func(b *types.Block) { @@ -804,6 +787,15 @@ func TestValidateV2Block(t *testing.T) { txn.FileContracts[0].TotalCollateral = txn.FileContracts[0].TotalCollateral.Sub(types.Siacoins(1)) }, }, + { + "conflicting revisions in same transaction", + func(b *types.Block) { + txn := &b.V2.Transactions[0] + newRevision := txn.FileContractRevisions[0] + newRevision.Revision.RevisionNumber++ + txn.FileContractRevisions = append(txn.FileContractRevisions, newRevision) + }, + }, } for _, test := range tests { corruptBlock := deepCopyBlock(validBlock) diff --git a/types/hash.go b/types/hash.go index 5f69e2a2..258725c2 100644 --- a/types/hash.go +++ b/types/hash.go @@ -58,7 +58,7 @@ func StandardAddress(pk PublicKey) Address { buf := make([]byte, 12+1+1+len(pk)) copy(buf, "sia/address|") buf[12] = 1 // version - buf[13] = 2 // opPublicKey + buf[13] = 3 // opPublicKey copy(buf[14:], pk[:]) return Address(blake2b.Sum256(buf)) } diff --git a/types/policy_test.go b/types/policy_test.go index 746e49f2..cbc25e5c 100644 --- a/types/policy_test.go +++ b/types/policy_test.go @@ -24,48 +24,56 @@ func TestPolicyVerify(t *testing.T) { sigHash := Hash256{1, 2, 3} for _, test := range []struct { + desc string p SpendPolicy height uint64 sigs []Signature valid bool }{ { + "above 0", PolicyAbove(0), 0, nil, true, }, { + "below 1", PolicyAbove(1), 0, nil, false, }, { + "above 1", PolicyAbove(1), 1, nil, true, }, { + "no signature", PolicyPublicKey(pk), 1, nil, false, }, { + "invalid signature", PolicyPublicKey(pk), 1, []Signature{key.SignHash(Hash256{})}, false, }, { + "valid signature", PolicyPublicKey(pk), 1, []Signature{key.SignHash(sigHash)}, true, }, { + "valid signature, invalid height", PolicyThreshold(2, []SpendPolicy{ PolicyAbove(10), PolicyPublicKey(pk), @@ -75,6 +83,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "valid height, invalid signature", PolicyThreshold(2, []SpendPolicy{ PolicyAbove(10), PolicyPublicKey(pk), @@ -84,6 +93,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "valid height, valid signature", PolicyThreshold(2, []SpendPolicy{ PolicyAbove(10), PolicyPublicKey(pk), @@ -93,33 +103,37 @@ func TestPolicyVerify(t *testing.T) { true, }, { + "lower threshold, valid height", PolicyThreshold(1, []SpendPolicy{ PolicyAbove(10), - PolicyPublicKey(pk), + PolicyOpaque(PolicyPublicKey(pk)), }), 11, - []Signature{key.SignHash(sigHash)}, + nil, true, }, { + "lower threshold, valid signature", PolicyThreshold(1, []SpendPolicy{ - PolicyAbove(10), - PolicyOpaque(PolicyPublicKey(pk)), + PolicyOpaque(PolicyAbove(10)), + PolicyPublicKey(pk), }), 11, []Signature{key.SignHash(sigHash)}, true, }, { + "exceed threshold", PolicyThreshold(1, []SpendPolicy{ - PolicyOpaque(PolicyAbove(10)), + PolicyAbove(10), PolicyPublicKey(pk), }), 11, []Signature{key.SignHash(sigHash)}, - true, + false, }, { + "lower threshold, neither valid", PolicyThreshold(1, []SpendPolicy{ PolicyOpaque(PolicyAbove(10)), PolicyOpaque(PolicyPublicKey(pk)), @@ -129,6 +143,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "unlock conditions within threshold", PolicyThreshold(1, []SpendPolicy{ {PolicyTypeUnlockConditions{ PublicKeys: []UnlockKey{pk.UnlockKey()}, @@ -140,6 +155,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "unlock conditions, invalid height", SpendPolicy{PolicyTypeUnlockConditions{ Timelock: 10, }}, @@ -148,6 +164,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "unlock conditions, insufficient signatures", SpendPolicy{PolicyTypeUnlockConditions{ SignaturesRequired: 1000, }}, @@ -156,6 +173,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "unlock conditions, wrong signature algorithm", SpendPolicy{PolicyTypeUnlockConditions{ PublicKeys: []UnlockKey{{ Algorithm: SpecifierEntropy, @@ -168,6 +186,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "unlock conditions, wrong pubkey", SpendPolicy{PolicyTypeUnlockConditions{ PublicKeys: []UnlockKey{{ Algorithm: SpecifierEd25519, @@ -180,6 +199,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "unlock conditions, insufficient signatures", SpendPolicy{PolicyTypeUnlockConditions{ PublicKeys: []UnlockKey{pk.UnlockKey()}, SignaturesRequired: 2, @@ -189,6 +209,7 @@ func TestPolicyVerify(t *testing.T) { false, }, { + "unlock conditions, valid", SpendPolicy{PolicyTypeUnlockConditions{ PublicKeys: []UnlockKey{pk.UnlockKey()}, SignaturesRequired: 1, @@ -199,7 +220,7 @@ func TestPolicyVerify(t *testing.T) { }, } { if err := test.p.Verify(test.height, time.Time{}, sigHash, test.sigs, nil); err != nil && test.valid { - t.Fatal(err) + t.Fatalf("%v: %v", test.desc, err) } else if err == nil && !test.valid { t.Fatal("expected error") } @@ -226,7 +247,7 @@ func TestPolicyGolden(t *testing.T) { PolicyPublicKey(PublicKey{4, 5, 6}), }), }) - if p.Address().String() != "addr:2fb1e5d351aea601e5b507f1f5e021a6aff363951850983f0d930361d17f8ba507f19a409e21" { + if p.Address().String() != "addr:111d2995afa8bf162180a647b9f1eb6a275fe8818e836b69b351871d5caf9c590ed25aec0616" { t.Fatal("wrong address:", p, p.Address()) } } From 930cd6eeca46fc2b6005ffa2cbb79fa74078e760 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Tue, 12 Sep 2023 14:41:51 -0400 Subject: [PATCH 44/53] gateway: Implement v2 gateway protocol --- chain/manager.go | 72 +++++-- consensus/merkle.go | 22 +- consensus/state.go | 36 ++-- consensus/update.go | 26 ++- consensus/validation.go | 2 +- consensus/validation_test.go | 4 +- gateway/encoding.go | 266 ++++++++++++++++++++--- gateway/gateway.go | 293 +++++++++++++++++++++++++ gateway/peer.go | 406 ++++++++++++++++------------------- types/types.go | 24 ++- 10 files changed, 839 insertions(+), 312 deletions(-) create mode 100644 gateway/gateway.go diff --git a/chain/manager.go b/chain/manager.go index 44fe5d9e..1dbfffa5 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -27,7 +27,7 @@ type Checkpoint struct { func (c Checkpoint) EncodeTo(e *types.Encoder) { e.WriteUint8(2) // block (and supplement) version types.V2Block(c.Block).EncodeTo(e) - e.WriteUint8(1) // state version + e.WriteUint8(2) // state version c.State.EncodeTo(e) e.WriteBool(c.Supplement != nil) if c.Supplement != nil { @@ -42,7 +42,7 @@ func (c *Checkpoint) DecodeFrom(d *types.Decoder) { d.SetErr(fmt.Errorf("incompatible block version (%d)", v)) } (*types.V2Block)(&c.Block).DecodeFrom(d) - if v := d.ReadUint8(); v != 1 { + if v := d.ReadUint8(); v != 2 { d.SetErr(fmt.Errorf("incompatible state version (%d)", v)) } c.State.DecodeFrom(d) @@ -149,6 +149,14 @@ func (m *Manager) Tip() types.ChainIndex { return m.TipState().Index } +// SyncCheckpoint returns the block at the specified index, along with its +// parent state. +func (m *Manager) SyncCheckpoint(index types.ChainIndex) (types.Block, consensus.State, bool) { + c, ok := m.store.Checkpoint(index.ID) + pc, ok2 := m.store.Checkpoint(c.Block.ParentID) + return c.Block, pc.State, ok && ok2 +} + // Block returns the block with the specified ID. func (m *Manager) Block(id types.BlockID) (types.Block, bool) { c, ok := m.store.Checkpoint(id) @@ -190,13 +198,12 @@ func (m *Manager) History() ([32]types.BlockID, error) { return history, nil } -// BlocksForHistory fills the provided slice with consecutive blocks from the -// best chain, starting from the "attach point" -- the first ID in the history -// that is present in the best chain (or, if no match is found, genesis). -// -// The returned slice may have fewer than len(blocks) elements if the end of the -// best chain is reached. -func (m *Manager) BlocksForHistory(blocks []types.Block, history []types.BlockID) ([]types.Block, error) { +// BlocksForHistory returns up to max consecutive blocks from the best chain, +// starting from the "attach point" -- the first ID in the history that is +// present in the best chain (or, if no match is found, genesis). It also +// returns the number of blocks between the end of the returned slice and the +// current tip. +func (m *Manager) BlocksForHistory(history []types.BlockID, max uint64) ([]types.Block, uint64, error) { m.mu.Lock() defer m.mu.Unlock() var attachHeight uint64 @@ -208,16 +215,19 @@ func (m *Manager) BlocksForHistory(blocks []types.Block, history []types.BlockID break } } + if max > m.tipState.Index.Height-attachHeight { + max = m.tipState.Index.Height - attachHeight + } + blocks := make([]types.Block, max) for i := range blocks { - if index, ok := m.store.BestIndex(attachHeight + uint64(i) + 1); !ok { - return blocks[:i], nil - } else if c, ok := m.store.Checkpoint(index.ID); !ok { - return nil, fmt.Errorf("missing block %v", index) - } else { - blocks[i] = c.Block + index, _ := m.store.BestIndex(attachHeight + uint64(i) + 1) + c, ok := m.store.Checkpoint(index.ID) + if !ok { + return nil, 0, fmt.Errorf("missing block %v", index) } + blocks[i] = c.Block } - return blocks, nil + return blocks, m.tipState.Index.Height - (attachHeight + max), nil } // AddBlocks adds a sequence of blocks to a tracked chain. If the blocks are @@ -828,6 +838,36 @@ func (m *Manager) V2PoolTransactions() []types.V2Transaction { return append([]types.V2Transaction(nil), m.txpool.v2txns...) } +// TransactionsForPartialBlock returns the transactions in the txpool with the +// specified hashes. +func (m *Manager) TransactionsForPartialBlock(missing []types.Hash256) (txns []types.Transaction, v2txns []types.V2Transaction) { + m.mu.Lock() + defer m.mu.Unlock() + m.revalidatePool() + want := make(map[types.Hash256]bool) + for _, h := range missing { + want[h] = true + } + // TODO: might want to cache these + for _, txn := range m.txpool.txns { + if h := txn.FullHash(); want[h] { + txns = append(txns, txn) + if delete(want, h); len(want) == 0 { + return + } + } + } + for _, txn := range m.txpool.v2txns { + if h := txn.FullHash(); want[h] { + v2txns = append(v2txns, txn) + if delete(want, h); len(want) == 0 { + return + } + } + } + return +} + // RecommendedFee returns the recommended fee (per weight unit) to ensure a high // probability of inclusion in the next block. func (m *Manager) RecommendedFee() types.Currency { diff --git a/consensus/merkle.go b/consensus/merkle.go index bae23f61..20a30bb9 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -394,7 +394,7 @@ func updateLeaves(leaves []elementLeaf) [64][]elementLeaf { // applyBlock applies the supplied leaves to the accumulator, modifying it and // producing an update. -func (acc *ElementAccumulator) applyBlock(updated, added []elementLeaf) (eau ElementApplyUpdate) { +func (acc *ElementAccumulator) applyBlock(updated, added []elementLeaf) (eau elementApplyUpdate) { eau.updated = updateLeaves(updated) for height, es := range eau.updated { if len(es) > 0 { @@ -412,7 +412,7 @@ func (acc *ElementAccumulator) applyBlock(updated, added []elementLeaf) (eau Ele // under acc, which must be the accumulator prior to the application of those // elements. All of the elements will be marked unspent. The accumulator itself // is not modified. -func (acc *ElementAccumulator) revertBlock(updated []elementLeaf) (eru ElementRevertUpdate) { +func (acc *ElementAccumulator) revertBlock(updated []elementLeaf) (eru elementRevertUpdate) { eru.updated = updateLeaves(updated) eru.numLeaves = acc.NumLeaves return @@ -443,17 +443,12 @@ func updateProof(e *types.StateElement, updated *[64][]elementLeaf) { } } -// An ElementApplyUpdate reflects the changes to an ElementAccumulator resulting -// from the application of a block. -type ElementApplyUpdate struct { +type elementApplyUpdate struct { updated [64][]elementLeaf treeGrowth [64][]types.Hash256 } -// UpdateElementProof updates the Merkle proof of the supplied element to -// incorporate the changes made to the accumulator. The element's proof must be -// up-to-date; if it is not, UpdateElementProof may panic. -func (eau *ElementApplyUpdate) UpdateElementProof(e *types.StateElement) { +func (eau *elementApplyUpdate) updateElementProof(e *types.StateElement) { if e.LeafIndex == types.EphemeralLeafIndex { panic("cannot update an ephemeral element") } @@ -461,17 +456,12 @@ func (eau *ElementApplyUpdate) UpdateElementProof(e *types.StateElement) { e.MerkleProof = append(e.MerkleProof, eau.treeGrowth[len(e.MerkleProof)]...) } -// An ElementRevertUpdate reflects the changes to an ElementAccumulator -// resulting from the removal of a block. -type ElementRevertUpdate struct { +type elementRevertUpdate struct { updated [64][]elementLeaf numLeaves uint64 } -// UpdateElementProof updates the Merkle proof of the supplied element to -// incorporate the changes made to the accumulator. The element's proof must be -// up-to-date; if it is not, UpdateElementProof may panic. -func (eru *ElementRevertUpdate) UpdateElementProof(e *types.StateElement) { +func (eru *elementRevertUpdate) updateElementProof(e *types.StateElement) { if e.LeafIndex == types.EphemeralLeafIndex { panic("cannot update an ephemeral element") } else if e.LeafIndex >= eru.numLeaves { diff --git a/consensus/state.go b/consensus/state.go index d1625eef..8369b56a 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -495,31 +495,27 @@ func (s State) PartialSigHash(txn types.Transaction, cf types.CoveredFields) typ return h.Sum() } -// Commitment computes the commitment hash for a child block. -func (s State) Commitment(minerAddr types.Address, txns []types.Transaction, v2txns []types.V2Transaction) types.Hash256 { - h := hasherPool.Get().(*types.Hasher) - defer hasherPool.Put(h) - h.Reset() - - // hash the state - s.EncodeTo(h.E) - stateHash := h.Sum() - - // hash the transactions +// TransactionsCommitment returns the commitment hash covering the transactions +// that comprise a child block. +func (s *State) TransactionsCommitment(txns []types.Transaction, v2txns []types.V2Transaction) types.Hash256 { var acc blake2b.Accumulator for _, txn := range txns { - h.Reset() - txn.EncodeTo(h.E) - acc.AddLeaf(h.Sum()) + acc.AddLeaf(txn.FullHash()) } for _, txn := range v2txns { - h.Reset() - txn.EncodeTo(h.E) - acc.AddLeaf(h.Sum()) + acc.AddLeaf(txn.FullHash()) } - txnsHash := types.Hash256(acc.Root()) + return acc.Root() +} - // concatenate the hashes and the miner address +// Commitment computes the commitment hash for a child block with the given +// transactions and miner address. +func (s State) Commitment(txnsHash types.Hash256, minerAddr types.Address) types.Hash256 { + h := hasherPool.Get().(*types.Hasher) + defer hasherPool.Put(h) + h.Reset() + s.EncodeTo(h.E) + stateHash := h.Sum() h.Reset() h.WriteDistinguisher("commitment") h.E.WriteUint8(s.v2ReplayPrefix()) @@ -536,7 +532,7 @@ func (s State) InputSigHash(txn types.V2Transaction) types.Hash256 { h := hasherPool.Get().(*types.Hasher) defer hasherPool.Put(h) h.Reset() - h.WriteDistinguisher("id/transaction") + h.WriteDistinguisher("sig/input") h.E.WriteUint8(s.v2ReplayPrefix()) h.E.WritePrefix(len(txn.SiacoinInputs)) for _, in := range txn.SiacoinInputs { diff --git a/consensus/update.go b/consensus/update.go index c085895c..ef7ed4d7 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -607,8 +607,15 @@ func (ms *MidState) forEachElementLeaf(fn func(elementLeaf)) { // An ApplyUpdate represents the effects of applying a block to a state. type ApplyUpdate struct { - ElementApplyUpdate - ms *MidState + ms *MidState + eau elementApplyUpdate +} + +// UpdateElementProof updates the Merkle proof of the supplied element to +// incorporate the changes made to the accumulator. The element's proof must be +// up-to-date; if it is not, UpdateElementProof may panic. +func (au ApplyUpdate) UpdateElementProof(e *types.StateElement) { + au.eau.updateElementProof(e) } // ForEachSiacoinElement calls fn on each siacoin element related to au. @@ -673,13 +680,20 @@ func ApplyBlock(s State, b types.Block, bs V1BlockSupplement, targetTimestamp ti }) eau := s.Elements.applyBlock(updated, added) s = ApplyOrphan(s, b, targetTimestamp) - return s, ApplyUpdate{eau, ms} + return s, ApplyUpdate{ms, eau} } // A RevertUpdate represents the effects of reverting to a prior state. type RevertUpdate struct { - ElementRevertUpdate - ms *MidState + ms *MidState + eru elementRevertUpdate +} + +// UpdateElementProof updates the Merkle proof of the supplied element to +// incorporate the changes made to the accumulator. The element's proof must be +// up-to-date; if it is not, UpdateElementProof may panic. +func (ru RevertUpdate) UpdateElementProof(e *types.StateElement) { + ru.eru.updateElementProof(e) } // ForEachSiacoinElement calls fn on each siacoin element related to ru. @@ -734,5 +748,5 @@ func RevertBlock(s State, b types.Block, bs V1BlockSupplement) RevertUpdate { } }) eru := s.Elements.revertBlock(updated) - return RevertUpdate{eru, ms} + return RevertUpdate{ms, eru} } diff --git a/consensus/validation.go b/consensus/validation.go index f49495ad..babb7279 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -96,7 +96,7 @@ func ValidateOrphan(s State, b types.Block) error { if b.V2 != nil { if b.V2.Height != s.Index.Height+1 { return errors.New("block height does not increment parent height") - } else if b.V2.Commitment != s.Commitment(b.MinerPayouts[0].Address, b.Transactions, b.V2.Transactions) { + } else if b.V2.Commitment != s.Commitment(s.TransactionsCommitment(b.Transactions, b.V2Transactions()), b.MinerPayouts[0].Address) { return errors.New("commitment hash does not match header") } } diff --git a/consensus/validation_test.go b/consensus/validation_test.go index 702e25d9..df4aca41 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -563,7 +563,7 @@ func TestValidateV2Block(t *testing.T) { }}, } signTxn(cs, &b.V2.Transactions[0]) - b.V2.Commitment = cs.Commitment(b.MinerPayouts[0].Address, b.Transactions, b.V2.Transactions) + b.V2.Commitment = cs.Commitment(cs.TransactionsCommitment(b.Transactions, b.V2Transactions()), b.MinerPayouts[0].Address) // initial block should be valid validBlock := deepCopyBlock(b) @@ -802,7 +802,7 @@ func TestValidateV2Block(t *testing.T) { test.corrupt(&corruptBlock) signTxn(cs, &corruptBlock.V2.Transactions[0]) if len(corruptBlock.MinerPayouts) > 0 { - corruptBlock.V2.Commitment = cs.Commitment(corruptBlock.MinerPayouts[0].Address, corruptBlock.Transactions, corruptBlock.V2.Transactions) + corruptBlock.V2.Commitment = cs.Commitment(cs.TransactionsCommitment(corruptBlock.Transactions, corruptBlock.V2Transactions()), corruptBlock.MinerPayouts[0].Address) } findBlockNonce(cs, &corruptBlock) diff --git a/gateway/encoding.go b/gateway/encoding.go index a33d4dea..d4950240 100644 --- a/gateway/encoding.go +++ b/gateway/encoding.go @@ -6,10 +6,11 @@ import ( "fmt" "io" + "go.sia.tech/core/consensus" "go.sia.tech/core/types" ) -func withEncoder(w io.Writer, fn func(*types.Encoder)) error { +func withV1Encoder(w io.Writer, fn func(*types.Encoder)) error { var buf bytes.Buffer e := types.NewEncoder(&buf) e.WritePrefix(0) // placeholder @@ -21,13 +22,25 @@ func withEncoder(w io.Writer, fn func(*types.Encoder)) error { return err } -func withDecoder(r io.Reader, maxLen int, fn func(*types.Decoder)) error { +func withV1Decoder(r io.Reader, maxLen int, fn func(*types.Decoder)) error { d := types.NewDecoder(io.LimitedReader{R: r, N: int64(8 + maxLen)}) d.ReadPrefix() // ignored fn(d) return d.Err() } +func withV2Encoder(w io.Writer, fn func(*types.Encoder)) error { + e := types.NewEncoder(w) + fn(e) + return e.Flush() +} + +func withV2Decoder(r io.Reader, maxLen int, fn func(*types.Decoder)) error { + d := types.NewDecoder(io.LimitedReader{R: r, N: int64(maxLen)}) + fn(d) + return d.Err() +} + func (h *Header) encodeTo(e *types.Encoder) { h.GenesisID.EncodeTo(e) e.Write(h.UniqueID[:]) @@ -55,21 +68,65 @@ func (h *BlockHeader) decodeFrom(d *types.Decoder) { } func (h *V2BlockHeader) encodeTo(e *types.Encoder) { - e.WriteUint64(h.Height) - h.ParentID.EncodeTo(e) + h.Parent.EncodeTo(e) e.WriteUint64(h.Nonce) e.WriteTime(h.Timestamp) + h.TransactionsRoot.EncodeTo(e) h.MinerAddress.EncodeTo(e) - h.Commitment.EncodeTo(e) } func (h *V2BlockHeader) decodeFrom(d *types.Decoder) { - h.Height = d.ReadUint64() - h.ParentID.DecodeFrom(d) + h.Parent.DecodeFrom(d) h.Nonce = d.ReadUint64() h.Timestamp = d.ReadTime() + h.TransactionsRoot.DecodeFrom(d) h.MinerAddress.DecodeFrom(d) - h.Commitment.DecodeFrom(d) +} + +func (ot *OutlineTransaction) encodeTo(e *types.Encoder) { + ot.Hash.EncodeTo(e) + if ot.Transaction != nil { + e.WriteBool(true) + ot.Transaction.EncodeTo(e) + } else { + e.WriteBool(false) + ot.V2Transaction.EncodeTo(e) + } +} + +func (ot *OutlineTransaction) decodeFrom(d *types.Decoder) { + ot.Hash.DecodeFrom(d) + if d.ReadBool() { + ot.Transaction = new(types.Transaction) + ot.Transaction.DecodeFrom(d) + } else { + ot.V2Transaction = new(types.V2Transaction) + ot.V2Transaction.DecodeFrom(d) + } +} + +func (pb *V2BlockOutline) encodeTo(e *types.Encoder) { + e.WriteUint64(pb.Height) + pb.ParentID.EncodeTo(e) + e.WriteUint64(pb.Nonce) + e.WriteTime(pb.Timestamp) + pb.MinerAddress.EncodeTo(e) + e.WritePrefix(len(pb.Transactions)) + for i := range pb.Transactions { + pb.Transactions[i].encodeTo(e) + } +} + +func (pb *V2BlockOutline) decodeFrom(d *types.Decoder) { + pb.Height = d.ReadUint64() + pb.ParentID.DecodeFrom(d) + pb.Nonce = d.ReadUint64() + pb.Timestamp = d.ReadTime() + pb.MinerAddress.DecodeFrom(d) + pb.Transactions = make([]OutlineTransaction, d.ReadPrefix()) + for i := range pb.Transactions { + pb.Transactions[i].decodeFrom(d) + } } type object interface { @@ -187,16 +244,6 @@ func (r *RPCRelayHeader) encodeRequest(e *types.Encoder) { r.Header.encodeTo(e) func (r *RPCRelayHeader) decodeRequest(d *types.Decoder) { r.Header.decodeFrom(d) } func (r *RPCRelayHeader) maxRequestLen() int { return 32 + 8 + 8 + 32 } -// RPCRelayV2Header relays a v2 header. -type RPCRelayV2Header struct { - Header V2BlockHeader - emptyResponse -} - -func (r *RPCRelayV2Header) encodeRequest(e *types.Encoder) { r.Header.encodeTo(e) } -func (r *RPCRelayV2Header) decodeRequest(d *types.Decoder) { r.Header.decodeFrom(d) } -func (r *RPCRelayV2Header) maxRequestLen() int { return 32 + 8 + 8 + 32 } - // RPCRelayTransactionSet relays a transaction set. type RPCRelayTransactionSet struct { Transactions []types.Transaction @@ -217,6 +264,134 @@ func (r *RPCRelayTransactionSet) decodeRequest(d *types.Decoder) { } func (r *RPCRelayTransactionSet) maxRequestLen() int { return 5e6 } +// RPCSendV2Blocks requests a set of blocks. +type RPCSendV2Blocks struct { + History []types.BlockID + Max uint64 + Blocks []types.Block + Remaining uint64 +} + +func (r *RPCSendV2Blocks) encodeRequest(e *types.Encoder) { + for i := range r.History { + r.History[i].EncodeTo(e) + } + e.WriteUint64(r.Max) +} +func (r *RPCSendV2Blocks) decodeRequest(d *types.Decoder) { + r.History = make([]types.BlockID, d.ReadPrefix()) + for i := range r.History { + r.History[i].DecodeFrom(d) + } + r.Max = d.ReadUint64() +} +func (r *RPCSendV2Blocks) maxRequestLen() int { return 8 + 32*32 + 8 } + +func (r *RPCSendV2Blocks) encodeResponse(e *types.Encoder) { + e.WritePrefix(len(r.Blocks)) + for i := range r.Blocks { + types.V2Block(r.Blocks[i]).EncodeTo(e) + } + e.WriteUint64(r.Remaining) +} +func (r *RPCSendV2Blocks) decodeResponse(d *types.Decoder) { + r.Blocks = make([]types.Block, d.ReadPrefix()) + for i := range r.Blocks { + (*types.V2Block)(&r.Blocks[i]).DecodeFrom(d) + } + r.Remaining = d.ReadUint64() +} +func (r *RPCSendV2Blocks) maxResponseLen() int { return int(r.Max) * 5e6 } + +// RPCSendTransactions requests a subset of a block's transactions. +type RPCSendTransactions struct { + Index types.ChainIndex + Hashes []types.Hash256 + + Transactions []types.Transaction + V2Transactions []types.V2Transaction +} + +func (r *RPCSendTransactions) encodeRequest(e *types.Encoder) { + r.Index.EncodeTo(e) + e.WritePrefix(len(r.Hashes)) + for i := range r.Hashes { + r.Hashes[i].EncodeTo(e) + } +} +func (r *RPCSendTransactions) decodeRequest(d *types.Decoder) { + r.Index.DecodeFrom(d) + r.Hashes = make([]types.Hash256, d.ReadPrefix()) + for i := range r.Hashes { + r.Hashes[i].DecodeFrom(d) + } +} +func (r *RPCSendTransactions) maxRequestLen() int { return 8 + 32 + 8 + 100*32 } + +func (r *RPCSendTransactions) encodeResponse(e *types.Encoder) { + e.WritePrefix(len(r.Transactions)) + for i := range r.Transactions { + r.Transactions[i].EncodeTo(e) + } + e.WritePrefix(len(r.V2Transactions)) + for i := range r.V2Transactions { + r.V2Transactions[i].EncodeTo(e) + } +} +func (r *RPCSendTransactions) decodeResponse(d *types.Decoder) { + r.Transactions = make([]types.Transaction, d.ReadPrefix()) + for i := range r.Transactions { + r.Transactions[i].DecodeFrom(d) + } + r.V2Transactions = make([]types.V2Transaction, d.ReadPrefix()) + for i := range r.V2Transactions { + r.V2Transactions[i].DecodeFrom(d) + } +} +func (r *RPCSendTransactions) maxResponseLen() int { return 5e6 } + +// RPCSendCheckpoint requests a checkpoint. +type RPCSendCheckpoint struct { + Index types.ChainIndex + + Block types.Block + State consensus.State +} + +func (r *RPCSendCheckpoint) encodeRequest(e *types.Encoder) { r.Index.EncodeTo(e) } +func (r *RPCSendCheckpoint) decodeRequest(d *types.Decoder) { r.Index.DecodeFrom(d) } +func (r *RPCSendCheckpoint) maxRequestLen() int { return 8 + 32 } + +func (r *RPCSendCheckpoint) encodeResponse(e *types.Encoder) { + (types.V2Block)(r.Block).EncodeTo(e) + r.State.EncodeTo(e) +} +func (r *RPCSendCheckpoint) decodeResponse(d *types.Decoder) { + (*types.V2Block)(&r.Block).DecodeFrom(d) + r.State.DecodeFrom(d) +} +func (r *RPCSendCheckpoint) maxResponseLen() int { return 5e6 + 4e3 } + +// RPCRelayV2Header relays a v2 block header. +type RPCRelayV2Header struct { + Header V2BlockHeader + emptyResponse +} + +func (r *RPCRelayV2Header) encodeRequest(e *types.Encoder) { r.Header.encodeTo(e) } +func (r *RPCRelayV2Header) decodeRequest(d *types.Decoder) { r.Header.decodeFrom(d) } +func (r *RPCRelayV2Header) maxRequestLen() int { return 8 + 32 + 8 + 8 + 32 + 32 } + +// RPCRelayV2BlockOutline relays a v2 block outline. +type RPCRelayV2BlockOutline struct { + Block V2BlockOutline + emptyResponse +} + +func (r *RPCRelayV2BlockOutline) encodeRequest(e *types.Encoder) { r.Block.encodeTo(e) } +func (r *RPCRelayV2BlockOutline) decodeRequest(d *types.Decoder) { r.Block.decodeFrom(d) } +func (r *RPCRelayV2BlockOutline) maxRequestLen() int { return 5e6 } + // RPCRelayV2TransactionSet relays a v2 transaction set. type RPCRelayV2TransactionSet struct { Transactions []types.V2Transaction @@ -237,26 +412,29 @@ func (r *RPCRelayV2TransactionSet) decodeRequest(d *types.Decoder) { } func (r *RPCRelayV2TransactionSet) maxRequestLen() int { return 5e6 } -type rpcID types.Specifier +type v1RPCID types.Specifier -func (id *rpcID) encodeTo(e *types.Encoder) { e.Write(id[:8]) } -func (id *rpcID) decodeFrom(d *types.Decoder) { d.Read(id[:8]) } - -func newID(str string) (id rpcID) { - copy(id[:8], str) - return -} +func (id *v1RPCID) encodeTo(e *types.Encoder) { e.Write(id[:8]) } +func (id *v1RPCID) decodeFrom(d *types.Decoder) { d.Read(id[:8]) } var ( - idShareNodes = newID("ShareNodes") - idDiscoverIP = newID("DiscoverIP") - idSendBlocks = newID("SendBlocks") - idSendBlk = newID("SendBlk") - idRelayHeader = newID("RelayHeader") - idRelayTransactionSet = newID("RelayTransactionSet") + // v1 + idShareNodes = types.NewSpecifier("ShareNodes") + idDiscoverIP = types.NewSpecifier("DiscoverIP") + idSendBlocks = types.NewSpecifier("SendBlocks") + idSendBlk = types.NewSpecifier("SendBlk") + idRelayHeader = types.NewSpecifier("RelayHeader") + idRelayTransactionSet = types.NewSpecifier("RelayTransactionSet") + // v2 + idSendV2Blocks = types.NewSpecifier("SendV2Blocks") + idSendTransactions = types.NewSpecifier("SendTransactions") + idSendCheckpoint = types.NewSpecifier("SendCheckpoint") + idRelayV2Header = types.NewSpecifier("RelayV2Header") + idRelayV2BlockOutline = types.NewSpecifier("RelayV2BlockOutline") + idRelayV2TransactionSet = types.NewSpecifier("RelayV2TransactionSet") ) -func idForObject(o object) rpcID { +func idForObject(o object) types.Specifier { switch o.(type) { case *RPCShareNodes: return idShareNodes @@ -270,12 +448,24 @@ func idForObject(o object) rpcID { return idRelayHeader case *RPCRelayTransactionSet: return idRelayTransactionSet + case *RPCSendV2Blocks: + return idSendV2Blocks + case *RPCSendTransactions: + return idSendTransactions + case *RPCSendCheckpoint: + return idSendCheckpoint + case *RPCRelayV2Header: + return idRelayV2Header + case *RPCRelayV2BlockOutline: + return idRelayV2BlockOutline + case *RPCRelayV2TransactionSet: + return idRelayV2TransactionSet default: panic(fmt.Sprintf("unhandled object type %T", o)) } } -func objectForID(id rpcID) object { +func objectForID(id types.Specifier) object { switch id { case idShareNodes: return new(RPCShareNodes) @@ -289,6 +479,14 @@ func objectForID(id rpcID) object { return new(RPCRelayHeader) case idRelayTransactionSet: return new(RPCRelayTransactionSet) + case idSendV2Blocks: + return new(RPCSendV2Blocks) + case idSendTransactions: + return new(RPCSendTransactions) + case idRelayV2Header: + return new(RPCRelayV2BlockOutline) + case idRelayV2TransactionSet: + return new(RPCRelayV2TransactionSet) default: return nil } diff --git a/gateway/gateway.go b/gateway/gateway.go new file mode 100644 index 00000000..8edc1806 --- /dev/null +++ b/gateway/gateway.go @@ -0,0 +1,293 @@ +package gateway + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "strings" + "time" + + "go.sia.tech/core/consensus" + "go.sia.tech/core/internal/blake2b" + "go.sia.tech/core/internal/smux" + "go.sia.tech/core/types" + "go.sia.tech/mux" + "lukechampine.com/frand" +) + +// A UniqueID is a randomly-generated nonce that helps prevent self-connections +// and double-connections. +type UniqueID [8]byte + +// GenerateUniqueID returns a random UniqueID. +func GenerateUniqueID() (id UniqueID) { + frand.Read(id[:]) + return +} + +// A Header contains various peer metadata which is exchanged during the gateway +// handshake. +type Header struct { + GenesisID types.BlockID + UniqueID UniqueID + NetAddress string +} + +func validateHeader(ours, theirs Header) error { + if theirs.GenesisID != ours.GenesisID { + return errors.New("peer has different genesis block") + } else if theirs.UniqueID == ours.UniqueID { + return errors.New("peer has same unique ID as us") + } + return nil +} + +func writeHeader(conn net.Conn, ourHeader Header) error { + var accept string + if err := withV1Encoder(conn, ourHeader.encodeTo); err != nil { + return fmt.Errorf("could not write our header: %w", err) + } else if err := withV1Decoder(conn, 128, func(d *types.Decoder) { accept = d.ReadString() }); err != nil { + return fmt.Errorf("could not read peer header acceptance: %w", err) + } else if accept != "accept" { + return fmt.Errorf("peer rejected our header: %v", accept) + } + return nil +} + +func readHeader(conn net.Conn, ourHeader Header, dialAddr *string) error { + var peerHeader Header + if err := withV1Decoder(conn, 32+8+128, peerHeader.decodeFrom); err != nil { + return fmt.Errorf("could not read peer's header: %w", err) + } else if err := validateHeader(ourHeader, peerHeader); err != nil { + withV1Encoder(conn, func(e *types.Encoder) { e.WriteString(err.Error()) }) + return fmt.Errorf("unacceptable header: %w", err) + } else if err := withV1Encoder(conn, func(e *types.Encoder) { e.WriteString("accept") }); err != nil { + return fmt.Errorf("could not write accept: %w", err) + } else if host, _, err := net.SplitHostPort(conn.RemoteAddr().String()); err != nil { + return fmt.Errorf("invalid remote addr (%q): %w", conn.RemoteAddr(), err) + } else if _, port, err := net.SplitHostPort(peerHeader.NetAddress); err != nil { + return fmt.Errorf("peer provided invalid net address (%q): %w", peerHeader.NetAddress, err) + } else { + *dialAddr = net.JoinHostPort(host, port) + } + return nil +} + +// A BlockHeader contains a Block's non-transaction data. +type BlockHeader struct { + ParentID types.BlockID + Nonce uint64 + Timestamp time.Time + MerkleRoot types.Hash256 +} + +// ID returns a hash that uniquely identifies the block. +func (h BlockHeader) ID() types.BlockID { + buf := make([]byte, 32+8+8+32) + copy(buf[:32], h.ParentID[:]) + binary.LittleEndian.PutUint64(buf[32:], h.Nonce) + binary.LittleEndian.PutUint64(buf[40:], uint64(h.Timestamp.Unix())) + copy(buf[48:], h.MerkleRoot[:]) + return types.BlockID(types.HashBytes(buf)) +} + +// A V2BlockHeader contains a V2Block's non-transaction data. +type V2BlockHeader struct { + Parent types.ChainIndex + Nonce uint64 + Timestamp time.Time + TransactionsRoot types.Hash256 + MinerAddress types.Address +} + +// ID returns a hash that uniquely identifies the block. +func (h V2BlockHeader) ID(cs consensus.State) types.BlockID { + return (&types.Block{ + Nonce: h.Nonce, + Timestamp: h.Timestamp, + V2: &types.V2BlockData{Commitment: cs.Commitment(h.TransactionsRoot, h.MinerAddress)}, + }).ID() +} + +// An OutlineTransaction identifies a transaction by its full hash. The actual +// transaction data may or may not be present. +type OutlineTransaction struct { + Hash types.Hash256 + Transaction *types.Transaction + V2Transaction *types.V2Transaction +} + +// A V2BlockOutline represents a Block with one or more transactions omitted. +// The original block can be reconstructed by matching the transaction hashes +// to transactions present in the txpool, or requesting them from peers. +type V2BlockOutline struct { + Height uint64 + ParentID types.BlockID + Nonce uint64 + Timestamp time.Time + MinerAddress types.Address + Transactions []OutlineTransaction +} + +func (pb V2BlockOutline) commitment(cs consensus.State) types.Hash256 { + var acc blake2b.Accumulator + for _, txn := range pb.Transactions { + acc.AddLeaf(txn.Hash) + } + return cs.Commitment(acc.Root(), pb.MinerAddress) +} + +// ID returns a hash that uniquely identifies the block. +func (pb V2BlockOutline) ID(cs consensus.State) types.BlockID { + return (&types.Block{ + Nonce: pb.Nonce, + Timestamp: pb.Timestamp, + V2: &types.V2BlockData{Commitment: pb.commitment(cs)}, + }).ID() +} + +// Missing returns the hashes of transactions that are missing from the block. +func (pb V2BlockOutline) Missing() (missing []types.Hash256) { + for _, txn := range pb.Transactions { + if txn.Transaction == nil && txn.V2Transaction == nil { + missing = append(missing, txn.Hash) + } + } + return +} + +// Complete attempts to reconstruct the original block using the supplied +// transactions. If the block cannot be fully reconstructed, it returns the +// hashes of the missing transactions. +func (pb *V2BlockOutline) Complete(cs consensus.State, txns []types.Transaction, v2txns []types.V2Transaction) (types.Block, []types.Hash256) { + var v1hashes map[types.Hash256]types.Transaction + var v2hashes map[types.Hash256]types.V2Transaction + completeTxn := func(ptxn *OutlineTransaction) { + if ptxn.Transaction != nil || ptxn.V2Transaction != nil { + return + } + if v1hashes == nil { + v1hashes = make(map[types.Hash256]types.Transaction, len(txns)) + for _, txn := range txns { + v1hashes[txn.FullHash()] = txn + } + } + if txn, ok := v1hashes[ptxn.Hash]; ok { + ptxn.Transaction = &txn + return + } + if v2hashes == nil { + v2hashes = make(map[types.Hash256]types.V2Transaction, len(txns)) + for _, txn := range v2txns { + v2hashes[txn.FullHash()] = txn + } + } + if txn, ok := v2hashes[ptxn.Hash]; ok { + ptxn.V2Transaction = &txn + return + } + } + + b := types.Block{ + ParentID: pb.ParentID, + Nonce: pb.Nonce, + Timestamp: pb.Timestamp, + MinerPayouts: []types.SiacoinOutput{{Address: pb.MinerAddress, Value: cs.BlockReward()}}, + V2: &types.V2BlockData{ + Height: pb.Height, + Commitment: pb.commitment(cs), + }, + } + for i := range pb.Transactions { + ptxn := &pb.Transactions[i] + completeTxn(ptxn) + if ptxn.Transaction != nil { + b.Transactions = append(b.Transactions, *ptxn.Transaction) + for _, fee := range ptxn.Transaction.MinerFees { + b.MinerPayouts[0].Value = b.MinerPayouts[0].Value.Add(fee) + } + } else if ptxn.V2Transaction != nil { + b.V2.Transactions = append(b.V2.Transactions, *ptxn.V2Transaction) + b.MinerPayouts[0].Value = b.MinerPayouts[0].Value.Add(ptxn.V2Transaction.MinerFee) + } + } + return b, pb.Missing() +} + +// RemoveTransactions removes the specified transactions from the block. +func (pb *V2BlockOutline) RemoveTransactions(txns []types.Transaction, v2txns []types.V2Transaction) { + remove := make(map[types.Hash256]bool) + for _, txn := range txns { + remove[txn.FullHash()] = true + } + for _, txn := range v2txns { + remove[txn.FullHash()] = true + } + for i := range pb.Transactions { + if remove[pb.Transactions[i].Hash] { + pb.Transactions[i].Transaction = nil + pb.Transactions[i].V2Transaction = nil + } + } +} + +// Dial initiates the gateway handshake with a peer. +func Dial(conn net.Conn, ourHeader Header) (*Peer, error) { + p := &Peer{ + ConnAddr: conn.RemoteAddr().String(), + Inbound: false, + } + + // exchange versions + const ourVersion = "2.0.0" + if err := withV1Encoder(conn, func(e *types.Encoder) { e.WriteString(ourVersion) }); err != nil { + return nil, fmt.Errorf("could not write our version: %w", err) + } else if err := withV1Decoder(conn, 128, func(d *types.Decoder) { p.Version = d.ReadString() }); err != nil { + return nil, fmt.Errorf("could not read peer version: %w", err) + } + // exchange headers + if err := writeHeader(conn, ourHeader); err != nil { + return nil, fmt.Errorf("could not write our header: %w", err) + } else if err := readHeader(conn, ourHeader, &p.Addr); err != nil { + return nil, fmt.Errorf("could not read peer's header: %w", err) + } + // establish mux + var err error + if strings.HasPrefix(p.Version, "1.") { + p.smux, err = smux.Client(conn, nil) + } else { + p.mux, err = mux.DialAnonymous(conn) + } + return p, err +} + +// Accept reciprocates the gateway handshake with a peer. +func Accept(conn net.Conn, ourHeader Header) (*Peer, error) { + p := &Peer{ + ConnAddr: conn.RemoteAddr().String(), + Inbound: true, + } + + // exchange versions + const ourVersion = "2.0.0" + if err := withV1Decoder(conn, 128, func(d *types.Decoder) { p.Version = d.ReadString() }); err != nil { + return nil, fmt.Errorf("could not read peer version: %w", err) + } else if err := withV1Encoder(conn, func(e *types.Encoder) { e.WriteString(ourVersion) }); err != nil { + return nil, fmt.Errorf("could not write our version: %w", err) + } + // exchange headers + if err := readHeader(conn, ourHeader, &p.Addr); err != nil { + return nil, fmt.Errorf("could not read peer's header: %w", err) + } else if err := writeHeader(conn, ourHeader); err != nil { + return nil, fmt.Errorf("could not write our header: %w", err) + } + // establish mux + var err error + if strings.HasPrefix(p.Version, "1.") { + p.smux, err = smux.Server(conn, nil) + } else { + p.mux, err = mux.AcceptAnonymous(conn) + } + return p, err +} diff --git a/gateway/peer.go b/gateway/peer.go index ff7edab1..96915a0b 100644 --- a/gateway/peer.go +++ b/gateway/peer.go @@ -1,90 +1,27 @@ package gateway import ( - "encoding/binary" "errors" "fmt" + "io" "net" "sync" "time" + "go.sia.tech/core/consensus" "go.sia.tech/core/internal/smux" "go.sia.tech/core/types" - "lukechampine.com/frand" + "go.sia.tech/mux" ) -// A UniqueID is a randomly-generated nonce that helps prevent self-connections -// and double-connections. -type UniqueID [8]byte - -// GenerateUniqueID returns a random UniqueID. -func GenerateUniqueID() (id UniqueID) { - frand.Read(id[:]) - return -} - -// A Header contains various peer metadata which is exchanged during the gateway -// handshake. -type Header struct { - GenesisID types.BlockID - UniqueID UniqueID - NetAddress string -} - -func validateHeader(ours, theirs Header) error { - if theirs.GenesisID != ours.GenesisID { - return errors.New("peer has different genesis block") - } else if theirs.UniqueID == ours.UniqueID { - return errors.New("peer has same unique ID as us") - } - return nil -} - -// A BlockHeader contains a Block's non-transaction data. -type BlockHeader struct { - ParentID types.BlockID - Nonce uint64 - Timestamp time.Time - MerkleRoot types.Hash256 -} - -// ID returns a hash that uniquely identifies the block. -func (h BlockHeader) ID() types.BlockID { - buf := make([]byte, 32+8+8+32) - copy(buf[:32], h.ParentID[:]) - binary.LittleEndian.PutUint64(buf[32:], h.Nonce) - binary.LittleEndian.PutUint64(buf[40:], uint64(h.Timestamp.Unix())) - copy(buf[48:], h.MerkleRoot[:]) - return types.BlockID(types.HashBytes(buf)) -} - -// A V2BlockHeader contains a Block's non-transaction data. -type V2BlockHeader struct { - Height uint64 - ParentID types.BlockID - Nonce uint64 - Timestamp time.Time - MinerAddress types.Address - Commitment types.Hash256 -} - -// ID returns a hash that uniquely identifies the block. -func (h V2BlockHeader) ID() types.BlockID { - buf := make([]byte, 32+8+8+32) - copy(buf[:32], "sia/id/block|") - binary.LittleEndian.PutUint64(buf[32:], h.Nonce) - binary.LittleEndian.PutUint64(buf[40:], uint64(h.Timestamp.Unix())) - copy(buf[48:], h.Commitment[:]) - return types.BlockID(types.HashBytes(buf)) -} - // A Peer is a connected gateway peer. type Peer struct { + Version string Addr string ConnAddr string Inbound bool - Version string - mux *smux.Session + smux *smux.Session // for v1 + mux *mux.Mux // for v2 mu sync.Mutex err error } @@ -97,6 +34,9 @@ func (p *Peer) String() string { return "->" + p.ConnAddr } +// SupportsV2 returns true if the peer supports v2 RPCs. +func (p *Peer) SupportsV2() bool { return p.mux != nil } + // Err returns the error that caused the peer to disconnect, if any. func (p *Peer) Err() error { p.mu.Lock() @@ -110,7 +50,11 @@ func (p *Peer) SetErr(err error) error { defer p.mu.Unlock() if p.err == nil { p.err = err - p.mux.Close() + if p.smux != nil { + p.smux.Close() + } else { + p.mux.Close() + } } return p.err } @@ -121,109 +65,194 @@ func (p *Peer) Close() error { return nil } +func (p *Peer) openStream() (net.Conn, error) { + if p.smux != nil { + return p.smux.OpenStream() + } + return p.mux.DialStream(), nil +} + +func (p *Peer) acceptStream() (net.Conn, error) { + if p.smux != nil { + return p.smux.AcceptStream() + } + return p.mux.AcceptStream() +} + +func (p *Peer) withEncoder(w io.Writer, fn func(*types.Encoder)) error { + if p.smux != nil { + return withV1Encoder(w, fn) + } + return withV2Encoder(w, fn) +} + +func (p *Peer) withDecoder(r io.Reader, maxLen int, fn func(*types.Decoder)) error { + if p.smux != nil { + return withV1Decoder(r, maxLen, fn) + } + return withV2Decoder(r, maxLen, fn) +} + // An RPCHandler handles RPCs received from a peer. type RPCHandler interface { + // v1 PeersForShare() []string Block(id types.BlockID) (types.Block, error) - BlocksForHistory(history [32]types.BlockID) ([]types.Block, bool, error) + BlocksForHistory(history []types.BlockID, max uint64) ([]types.Block, uint64, error) RelayHeader(h BlockHeader, origin *Peer) RelayTransactionSet(txns []types.Transaction, origin *Peer) - RelayV2Header(h V2BlockHeader, origin *Peer) + // v2 + Transactions(index types.ChainIndex, txns []types.Hash256) ([]types.Transaction, []types.V2Transaction, error) + Checkpoint(index types.ChainIndex) (types.Block, consensus.State, error) + RelayV2BlockHeader(h V2BlockHeader, origin *Peer) + RelayV2BlockOutline(b V2BlockOutline, origin *Peer) RelayV2TransactionSet(txns []types.V2Transaction, origin *Peer) } // HandleRPC handles an RPC received from the peer. func (p *Peer) HandleRPC(id types.Specifier, stream net.Conn, h RPCHandler) error { - switch r := objectForID(rpcID(id)).(type) { + switch r := objectForID(id).(type) { case *RPCShareNodes: r.Peers = h.PeersForShare() - if err := withEncoder(stream, r.encodeResponse); err != nil { + if err := p.withEncoder(stream, r.encodeResponse); err != nil { return err } return nil case *RPCDiscoverIP: r.IP, _, _ = net.SplitHostPort(p.Addr) - if err := withEncoder(stream, r.encodeResponse); err != nil { + if err := p.withEncoder(stream, r.encodeResponse); err != nil { return err } return nil case *RPCRelayHeader: - if err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { + if err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { return err } h.RelayHeader(r.Header, p) return nil - case *RPCRelayV2Header: - if err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { - return err - } - h.RelayV2Header(r.Header, p) - return nil case *RPCRelayTransactionSet: - if err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { + if err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { return err } h.RelayTransactionSet(r.Transactions, p) return nil - case *RPCRelayV2TransactionSet: - if err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { - return err - } - h.RelayV2TransactionSet(r.Transactions, p) - return nil case *RPCSendBlk: - err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest) + err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest) if err != nil { return err } r.Block, err = h.Block(r.ID) if err != nil { return err - } else if err := withEncoder(stream, r.encodeResponse); err != nil { + } else if err := p.withEncoder(stream, r.encodeResponse); err != nil { return err } return nil case *RPCSendBlocks: - err := withDecoder(stream, r.maxRequestLen(), r.decodeRequest) + err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest) if err != nil { return err } for { - r.Blocks, r.MoreAvailable, err = h.BlocksForHistory(r.History) + var rem uint64 + r.Blocks, rem, err = h.BlocksForHistory(r.History[:], 10) + r.MoreAvailable = rem > 0 if err != nil { return err - } else if err := withEncoder(stream, r.encodeBlocksResponse); err != nil { + } else if err := p.withEncoder(stream, r.encodeBlocksResponse); err != nil { return err - } else if err := withEncoder(stream, r.encodeMoreAvailableResponse); err != nil { + } else if err := p.withEncoder(stream, r.encodeMoreAvailableResponse); err != nil { return err } else if !r.MoreAvailable { return nil } r.History[0] = r.Blocks[len(r.Blocks)-1].ID() } + case *RPCSendTransactions: + err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest) + if err != nil { + return err + } + r.Transactions, r.V2Transactions, err = h.Transactions(r.Index, r.Hashes) + if err != nil { + return err + } else if err := p.withEncoder(stream, r.encodeResponse); err != nil { + return err + } + return nil + case *RPCSendCheckpoint: + err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest) + if err != nil { + return err + } + r.Block, r.State, err = h.Checkpoint(r.Index) + if err != nil { + return err + } else if err := p.withEncoder(stream, r.encodeResponse); err != nil { + return err + } + return nil + case *RPCRelayV2Header: + if err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { + return err + } + h.RelayV2BlockHeader(r.Header, p) + return nil + case *RPCRelayV2BlockOutline: + if err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { + return err + } + h.RelayV2BlockOutline(r.Block, p) + return nil + case *RPCRelayV2TransactionSet: + if err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { + return err + } + h.RelayV2TransactionSet(r.Transactions, p) + return nil + case *RPCSendV2Blocks: + err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest) + if err != nil { + return err + } + if r.Max > 100 { + r.Max = 100 + } + r.Blocks, r.Remaining, err = h.BlocksForHistory(r.History, r.Max) + if err != nil { + return err + } else if err := p.withEncoder(stream, r.encodeResponse); err != nil { + return err + } + return nil default: return fmt.Errorf("unrecognized RPC: %q", id) } } func (p *Peer) callRPC(r object, timeout time.Duration) error { - s, err := p.mux.OpenStream() + s, err := p.openStream() if err != nil { return fmt.Errorf("couldn't open stream: %w", err) } defer s.Close() s.SetDeadline(time.Now().Add(timeout)) id := idForObject(r) - if err := withEncoder(s, id.encodeTo); err != nil { + if p.smux != nil { + err = p.withEncoder(s, (*v1RPCID)(&id).encodeTo) + } else { + err = p.withEncoder(s, id.EncodeTo) + } + if err != nil { return fmt.Errorf("couldn't write RPC ID: %w", err) } if r.maxRequestLen() > 0 { - if err := withEncoder(s, r.encodeRequest); err != nil { + if err := p.withEncoder(s, r.encodeRequest); err != nil { return fmt.Errorf("couldn't write request: %w", err) } } if r.maxResponseLen() > 0 { - if err := withDecoder(s, r.maxResponseLen(), r.decodeResponse); err != nil { + if err := p.withDecoder(s, r.maxResponseLen(), r.decodeResponse); err != nil { return fmt.Errorf("couldn't read response: %w", err) } } @@ -264,28 +293,28 @@ func (p *Peer) RelayTransactionSet(txns []types.Transaction, timeout time.Durati // SendBlocks downloads blocks from p, starting from the most recent element of // history known to p. The blocks are sent in batches, and fn is called on each // batch. -func (p *Peer) SendBlocks(history [32]types.BlockID, fn func([]types.Block) error) error { - s, err := p.mux.OpenStream() +func (p *Peer) SendBlocks(history [32]types.BlockID, timeout time.Duration, fn func([]types.Block) error) error { + s, err := p.openStream() if err != nil { return fmt.Errorf("couldn't open stream: %w", err) } defer s.Close() - s.SetDeadline(time.Now().Add(10 * time.Second)) + s.SetDeadline(time.Now().Add(timeout)) r := &RPCSendBlocks{History: history} - id := idForObject(r) - if err := withEncoder(s, id.encodeTo); err != nil { + id := v1RPCID(idForObject(r)) + if err := p.withEncoder(s, id.encodeTo); err != nil { return fmt.Errorf("couldn't write RPC ID: %w", err) - } else if err := withEncoder(s, r.encodeRequest); err != nil { + } else if err := p.withEncoder(s, r.encodeRequest); err != nil { return fmt.Errorf("couldn't write request: %w", err) } r.MoreAvailable = true for r.MoreAvailable { - s.SetDeadline(time.Now().Add(120 * time.Second)) - if err := withDecoder(s, r.maxBlocksResponseLen(), r.decodeBlocksResponse); err != nil { + s.SetDeadline(time.Now().Add(timeout)) + if err := p.withDecoder(s, r.maxBlocksResponseLen(), r.decodeBlocksResponse); err != nil { return fmt.Errorf("couldn't read response: %w", err) - } else if err := withDecoder(s, r.maxMoreAvailableResponseLen(), r.decodeMoreAvailableResponse); err != nil { + } else if err := p.withDecoder(s, r.maxMoreAvailableResponseLen(), r.decodeMoreAvailableResponse); err != nil { return fmt.Errorf("couldn't read response: %w", err) } else if err := fn(r.Blocks); err != nil { return err @@ -294,124 +323,71 @@ func (p *Peer) SendBlocks(history [32]types.BlockID, fn func([]types.Block) erro return nil } -// AcceptRPC accepts an RPC initiated by the peer. -func (p *Peer) AcceptRPC() (types.Specifier, net.Conn, error) { - s, err := p.mux.AcceptStream() - if err != nil { - return types.Specifier{}, nil, err - } - s.SetDeadline(time.Now().Add(5 * time.Second)) - var id types.Specifier - if err := withDecoder(s, 8, (*rpcID)(&id).decodeFrom); err != nil { - s.Close() - return types.Specifier{}, nil, err - } - s.SetDeadline(time.Time{}) - return id, s, nil +// SendTransactions requests a subset of a block's transactions from the peer. +func (p *Peer) SendTransactions(index types.ChainIndex, txnHashes []types.Hash256, timeout time.Duration) ([]types.Transaction, []types.V2Transaction, error) { + r := RPCSendTransactions{Index: index, Hashes: txnHashes} + err := p.callRPC(&r, timeout) + return r.Transactions, r.V2Transactions, err } -// DialPeer initiates the gateway handshake with a peer. -func DialPeer(conn net.Conn, ourHeader Header) (_ *Peer, err error) { - // exchange versions - ourVersion := "1.5.5" - var theirVersion string - if err := withEncoder(conn, func(e *types.Encoder) { e.WriteString(ourVersion) }); err != nil { - return nil, fmt.Errorf("could not write our version: %w", err) - } else if err := withDecoder(conn, 128, func(d *types.Decoder) { theirVersion = d.ReadString() }); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) +// SendCheckpoint requests a checkpoint from the peer. The checkpoint is +// validated. +func (p *Peer) SendCheckpoint(index types.ChainIndex, timeout time.Duration) (types.Block, consensus.State, error) { + r := RPCSendCheckpoint{Index: index} + err := p.callRPC(&r, timeout) + if err == nil { + if r.Block.V2 == nil || len(r.Block.MinerPayouts) != 1 { + err = errors.New("checkpoint is not a v2 block") + } else if (types.ChainIndex{ID: r.Block.ID()}) != index { + err = errors.New("checkpoint has wrong index") + } else if r.Block.V2.Commitment != r.State.Commitment(r.State.TransactionsCommitment(r.Block.Transactions, r.Block.V2Transactions()), r.Block.MinerPayouts[0].Address) { + err = errors.New("checkpoint has wrong commitment") + } } - // NOTE: we assume that the peer will be compatible, so we don't bother - // validating the version + return r.Block, r.State, err +} - // exchange headers - var accept string - var peerHeader Header - var dialAddr string - if err := withEncoder(conn, ourHeader.encodeTo); err != nil { - return nil, fmt.Errorf("could not write our header: %w", err) - } else if err := withDecoder(conn, 128, func(d *types.Decoder) { accept = d.ReadString() }); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) - } else if accept != "accept" { - return nil, fmt.Errorf("peer rejected our header: %v", accept) - } else if err := withDecoder(conn, 32+8+128, peerHeader.decodeFrom); err != nil { - return nil, fmt.Errorf("could not read peer's header: %w", err) - } else if err := validateHeader(ourHeader, peerHeader); err != nil { - withEncoder(conn, func(e *types.Encoder) { e.WriteString(err.Error()) }) - return nil, fmt.Errorf("unacceptable header: %w", err) - } else if err := withEncoder(conn, func(e *types.Encoder) { e.WriteString("accept") }); err != nil { - return nil, fmt.Errorf("could not write accept: %w", err) - } else if host, _, err := net.SplitHostPort(conn.RemoteAddr().String()); err != nil { - return nil, fmt.Errorf("invalid remote addr (%q): %w", conn.RemoteAddr(), err) - } else if _, port, err := net.SplitHostPort(peerHeader.NetAddress); err != nil { - return nil, fmt.Errorf("peer provided invalid net address (%q): %w", peerHeader.NetAddress, err) - } else { - dialAddr = net.JoinHostPort(host, port) - } +// RelayV2Header relays a v2 block header to the peer. +func (p *Peer) RelayV2Header(h V2BlockHeader, timeout time.Duration) error { + return p.callRPC(&RPCRelayV2Header{Header: h}, timeout) +} - // establish mux session - m, err := smux.Client(conn, nil) - if err != nil { - return nil, err - } +// RelayV2BlockOutline relays a v2 block outline to the peer. +func (p *Peer) RelayV2BlockOutline(b V2BlockOutline, timeout time.Duration) error { + return p.callRPC(&RPCRelayV2BlockOutline{Block: b}, timeout) +} - return &Peer{ - Addr: dialAddr, - ConnAddr: conn.RemoteAddr().String(), - Inbound: false, - Version: theirVersion, - mux: m, - }, nil +// RelayV2TransactionSet relays a v2 transaction set to the peer. +func (p *Peer) RelayV2TransactionSet(txns []types.V2Transaction, timeout time.Duration) error { + return p.callRPC(&RPCRelayV2TransactionSet{Transactions: txns}, timeout) } -// AcceptPeer reciprocates the gateway handshake with a peer. -func AcceptPeer(conn net.Conn, ourHeader Header) (_ *Peer, err error) { - // exchange versions - ourVersion := "1.5.5" - var theirVersion string - if err := withDecoder(conn, 128, func(d *types.Decoder) { theirVersion = d.ReadString() }); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) - } else if err := withEncoder(conn, func(e *types.Encoder) { e.WriteString(ourVersion) }); err != nil { - return nil, fmt.Errorf("could not write our version: %w", err) - } - // NOTE: we assume that the peer will be compatible, so we don't bother - // validating the version +// SendV2Blocks requests up to n blocks from p, starting from the most recent +// element of history known to p. The peer also returns the number of remaining +// blocks left to sync. +func (p *Peer) SendV2Blocks(history []types.BlockID, max uint64, timeout time.Duration) ([]types.Block, uint64, error) { + r := RPCSendV2Blocks{History: history, Max: max} + err := p.callRPC(&r, timeout) + return r.Blocks, r.Remaining, err +} - // exchange headers - var accept string - var peerHeader Header - var dialAddr string - if err := withDecoder(conn, 32+8+128, peerHeader.decodeFrom); err != nil { - return nil, fmt.Errorf("could not read peer's header: %w", err) - } else if err := validateHeader(ourHeader, peerHeader); err != nil { - withEncoder(conn, func(e *types.Encoder) { e.WriteString(err.Error()) }) - return nil, fmt.Errorf("unacceptable header: %w", err) - } else if err := withEncoder(conn, func(e *types.Encoder) { e.WriteString("accept") }); err != nil { - return nil, fmt.Errorf("could not write accept: %w", err) - } else if err := withEncoder(conn, ourHeader.encodeTo); err != nil { - return nil, fmt.Errorf("could not write our header: %w", err) - } else if err := withDecoder(conn, 128, func(d *types.Decoder) { accept = d.ReadString() }); err != nil { - return nil, fmt.Errorf("could not read peer version: %w", err) - } else if accept != "accept" { - return nil, fmt.Errorf("peer rejected our header: %v", accept) - } else if host, _, err := net.SplitHostPort(conn.RemoteAddr().String()); err != nil { - return nil, fmt.Errorf("invalid remote addr (%q): %w", conn.RemoteAddr(), err) - } else if _, port, err := net.SplitHostPort(peerHeader.NetAddress); err != nil { - return nil, fmt.Errorf("peer provided invalid net address (%q): %w", peerHeader.NetAddress, err) +// AcceptRPC accepts an RPC initiated by the peer. +func (p *Peer) AcceptRPC() (types.Specifier, net.Conn, error) { + s, err := p.acceptStream() + if err != nil { + return types.Specifier{}, nil, err + } + s.SetDeadline(time.Now().Add(5 * time.Second)) + var id types.Specifier + if p.smux != nil { + err = p.withDecoder(s, 8, (*v1RPCID)(&id).decodeFrom) } else { - dialAddr = net.JoinHostPort(host, port) + err = p.withDecoder(s, 16, id.DecodeFrom) } - - // establish mux session - m, err := smux.Server(conn, nil) if err != nil { - return nil, err + s.Close() + return types.Specifier{}, nil, err } - - return &Peer{ - Addr: dialAddr, - ConnAddr: conn.RemoteAddr().String(), - Inbound: true, - Version: theirVersion, - mux: m, - }, nil + s.SetDeadline(time.Time{}) + return id, s, nil } diff --git a/types/types.go b/types/types.go index 815c5ba2..7cfdde77 100644 --- a/types/types.go +++ b/types/types.go @@ -423,7 +423,7 @@ type Transaction struct { // transaction's effects, but not incidental data such as signatures. This // ensures that the ID will remain stable (i.e. non-malleable). // -// To hash all of the data in a transaction, use the EncodeTo method. +// To hash all of the data in a transaction, use the FullHash method. func (txn *Transaction) ID() TransactionID { h := hasherPool.Get().(*Hasher) defer hasherPool.Put(h) @@ -432,6 +432,16 @@ func (txn *Transaction) ID() TransactionID { return TransactionID(h.Sum()) } +// FullHash returns the hash of the transaction's binary encoding. This hash is +// only used in specific circumstances; generally, ID should be used instead. +func (txn *Transaction) FullHash() Hash256 { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + txn.EncodeTo(h.E) + return h.Sum() +} + // SiacoinOutputID returns the ID of the siacoin output at index i. func (txn *Transaction) SiacoinOutputID(i int) SiacoinOutputID { h := hasherPool.Get().(*Hasher) @@ -700,7 +710,7 @@ type V2Transaction struct { // transaction's effects, but not incidental data such as signatures or Merkle // proofs. This ensures that the ID will remain stable (i.e. non-malleable). // -// To hash all of the data in a transaction, use the EncodeTo method. +// To hash all of the data in a transaction, use the FullHash method. func (txn *V2Transaction) ID() TransactionID { // NOTE: In general, it is not possible to change a transaction's ID without // causing it to become invalid, but an exception exists for non-standard @@ -757,6 +767,16 @@ func (txn *V2Transaction) ID() TransactionID { return TransactionID(h.Sum()) } +// FullHash returns the hash of the transaction's binary encoding. This hash is +// only used in specific circumstances; generally, ID should be used instead. +func (txn *V2Transaction) FullHash() Hash256 { + h := hasherPool.Get().(*Hasher) + defer hasherPool.Put(h) + h.Reset() + txn.EncodeTo(h.E) + return h.Sum() +} + // SiacoinOutputID returns the ID for the siacoin output at index i. func (*V2Transaction) SiacoinOutputID(txid TransactionID, i int) SiacoinOutputID { h := hasherPool.Get().(*Hasher) From 99af07aaf4af2057628f8fcc93fe702946475f4d Mon Sep 17 00:00:00 2001 From: lukechampine Date: Wed, 13 Sep 2023 18:43:49 -0400 Subject: [PATCH 45/53] chain: Store checkpoint for genesis state --- chain/db.go | 1 + chain/manager.go | 5 ----- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/chain/db.go b/chain/db.go index 4643b7a4..f30587c6 100644 --- a/chain/db.go +++ b/chain/db.go @@ -613,6 +613,7 @@ func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBSto // store genesis checkpoint and apply its effects genesisState := n.GenesisState() + dbs.putCheckpoint(Checkpoint{types.Block{}, genesisState, &consensus.V1BlockSupplement{}}) bs := consensus.V1BlockSupplement{Transactions: make([]consensus.V1TransactionSupplement, len(genesisBlock.Transactions))} cs, cau := consensus.ApplyBlock(genesisState, genesisBlock, bs, time.Time{}) dbs.putCheckpoint(Checkpoint{genesisBlock, cs, &bs}) diff --git a/chain/manager.go b/chain/manager.go index 1dbfffa5..df39b82c 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -621,11 +621,6 @@ func (m *Manager) computeMedianFee() types.Currency { index, ok1 := m.store.BestIndex(m.tipState.Index.Height - i) c, ok2 := m.store.Checkpoint(index.ID) pc, ok3 := m.store.Checkpoint(c.Block.ParentID) - if !ok3 && m.tipState.Index.Height == 0 { - // bit of a hack to make the genesis block work - pc.State = c.State.Network.GenesisState() - ok3 = true - } if ok1 && ok2 && ok3 { prevFees = append(prevFees, calculateBlockMedianFee(pc.State, c.Block)) } From 56592d7c449e866df1bc9e02b7e1036df6e6fa59 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Wed, 13 Sep 2023 18:44:30 -0400 Subject: [PATCH 46/53] consensus: Guard against misuse of UpdateElementProof --- consensus/merkle.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/consensus/merkle.go b/consensus/merkle.go index 20a30bb9..c650316b 100644 --- a/consensus/merkle.go +++ b/consensus/merkle.go @@ -401,10 +401,12 @@ func (acc *ElementAccumulator) applyBlock(updated, added []elementLeaf) (eau ele acc.Trees[height] = es[0].proofRoot() } } + eau.oldNumLeaves = acc.NumLeaves eau.treeGrowth = acc.addLeaves(added) for _, e := range updated { e.MerkleProof = append(e.MerkleProof, eau.treeGrowth[len(e.MerkleProof)]...) } + eau.numLeaves = acc.NumLeaves return eau } @@ -444,16 +446,22 @@ func updateProof(e *types.StateElement, updated *[64][]elementLeaf) { } type elementApplyUpdate struct { - updated [64][]elementLeaf - treeGrowth [64][]types.Hash256 + updated [64][]elementLeaf + treeGrowth [64][]types.Hash256 + oldNumLeaves uint64 + numLeaves uint64 } func (eau *elementApplyUpdate) updateElementProof(e *types.StateElement) { if e.LeafIndex == types.EphemeralLeafIndex { panic("cannot update an ephemeral element") + } else if e.LeafIndex >= eau.oldNumLeaves { + return // newly-added element } updateProof(e, &eau.updated) - e.MerkleProof = append(e.MerkleProof, eau.treeGrowth[len(e.MerkleProof)]...) + if mh := mergeHeight(eau.numLeaves, e.LeafIndex); mh != len(e.MerkleProof) { + e.MerkleProof = append(e.MerkleProof, eau.treeGrowth[len(e.MerkleProof)]...) + } } type elementRevertUpdate struct { From 4793c8ef667f8dd1023d057fd9a33f03335b812a Mon Sep 17 00:00:00 2001 From: lukechampine Date: Wed, 13 Sep 2023 18:44:55 -0400 Subject: [PATCH 47/53] types: Handle overflow in (Currency).String --- types/currency.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/types/currency.go b/types/currency.go index 9f92c9ee..34bf039a 100644 --- a/types/currency.go +++ b/types/currency.go @@ -255,11 +255,13 @@ func (c Currency) String() string { } // iterate until we find a unit greater than c + // + // NOTE: MaxCurrency is ~340.3 TS mag := pico unit := "" for _, unit = range []string{"pS", "nS", "uS", "mS", "SC", "KS", "MS", "GS", "TS"} { - j := mag.Mul64(1000) - if c.Cmp(j) < 0 || unit == "TS" { + j, overflow := mag.Mul64WithOverflow(1000) + if overflow || c.Cmp(j) < 0 || unit == "TS" { break } mag = j From c5e9fb9868c4127c215bf93dd039abec7d1f4bf2 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Wed, 13 Sep 2023 18:45:34 -0400 Subject: [PATCH 48/53] types: Fix SatisfiedPolicy encoding for UnlockConditions --- types/encoding.go | 8 ++++++++ types/policy_test.go | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/types/encoding.go b/types/encoding.go index 33d6f996..7832d109 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -504,6 +504,10 @@ func (sp SatisfiedPolicy) EncodeTo(e *Encoder) { for i := range p.Of { rec(p.Of[i]) } + case PolicyTypeUnlockConditions: + for i := range p.PublicKeys { + rec(PolicyPublicKey(*(*PublicKey)(p.PublicKeys[i].Key))) + } default: // nothing to do } @@ -1078,6 +1082,10 @@ func (sp *SatisfiedPolicy) DecodeFrom(d *Decoder) { for i := range p.Of { rec(p.Of[i]) } + case PolicyTypeUnlockConditions: + for i := range p.PublicKeys { + rec(PolicyPublicKey(*(*PublicKey)(p.PublicKeys[i].Key))) + } default: // nothing to do } diff --git a/types/policy_test.go b/types/policy_test.go index cbc25e5c..327856e7 100644 --- a/types/policy_test.go +++ b/types/policy_test.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "fmt" "testing" "time" ) @@ -306,4 +307,25 @@ func TestPolicyRoundtrip(t *testing.T) { t.Fatal("policy did not survive roundtrip") } } + + // also check satisfied policy + sp := SatisfiedPolicy{ + Policy: SpendPolicy{PolicyTypeUnlockConditions{ + PublicKeys: []UnlockKey{ + PublicKey{1, 2, 3}.UnlockKey(), + PublicKey{4, 5, 6}.UnlockKey(), + PublicKey{7, 8, 9}.UnlockKey(), + }, + }}, + Signatures: []Signature{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + }, + } + var sp2 SatisfiedPolicy + roundtrip(sp, &sp2) + if fmt.Sprint(sp) != fmt.Sprint(sp2) { + t.Fatal("satisfied policy did not survive roundtrip:", sp, sp2) + } } From 9c3237701f39edf718ab2c819a3833eabad7868f Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 14 Sep 2023 21:59:42 -0400 Subject: [PATCH 49/53] consensus: Only encode valid PrevTimestamps --- consensus/state.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 8369b56a..04d5f704 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -107,7 +107,7 @@ type State struct { // EncodeTo implements types.EncoderTo. func (s State) EncodeTo(e *types.Encoder) { s.Index.EncodeTo(e) - for _, ts := range s.PrevTimestamps { + for _, ts := range s.PrevTimestamps[:s.numTimestamps()] { e.WriteTime(ts) } s.Depth.EncodeTo(e) @@ -128,7 +128,7 @@ func (s State) EncodeTo(e *types.Encoder) { // DecodeFrom implements types.DecoderFrom. func (s *State) DecodeFrom(d *types.Decoder) { s.Index.DecodeFrom(d) - for i := range s.PrevTimestamps { + for i := range s.PrevTimestamps[:s.numTimestamps()] { s.PrevTimestamps[i] = d.ReadTime() } s.Depth.DecodeFrom(d) From 313f01f603ee472a866843015bc9a1fe13e641ee Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 14 Sep 2023 22:01:16 -0400 Subject: [PATCH 50/53] consensus: Add encoding methods for Work --- consensus/update.go | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/consensus/update.go b/consensus/update.go index ef7ed4d7..a2ed0621 100644 --- a/consensus/update.go +++ b/consensus/update.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "encoding/binary" + "errors" "math/big" "math/bits" "time" @@ -27,6 +28,39 @@ func (w Work) EncodeTo(e *types.Encoder) { e.Write(w.n[:]) } // DecodeFrom implements types.DecoderFrom. func (w *Work) DecodeFrom(d *types.Decoder) { d.Read(w.n[:]) } +// String implements fmt.Stringer. +func (w Work) String() string { return new(big.Int).SetBytes(w.n[:]).String() } + +// MarshalText implements encoding.TextMarshaler. +func (w Work) MarshalText() ([]byte, error) { + return new(big.Int).SetBytes(w.n[:]).MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (w *Work) UnmarshalText(b []byte) error { + i := new(big.Int) + if err := i.UnmarshalText(b); err != nil { + return err + } else if i.Sign() < 0 { + return errors.New("value cannot be negative") + } else if i.BitLen() > 256 { + return errors.New("value overflows Work representation") + } + i.FillBytes(w.n[:]) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (w *Work) UnmarshalJSON(b []byte) error { + return w.UnmarshalText(bytes.Trim(b, `"`)) +} + +// MarshalJSON implements json.Marshaler. +func (w Work) MarshalJSON() ([]byte, error) { + js, err := new(big.Int).SetBytes(w.n[:]).MarshalJSON() + return []byte(`"` + string(js) + `"`), err +} + func (w Work) add(v Work) Work { var r Work var sum, c uint64 From 57e896a06c19f9861cd1465b7d3e8ff1fad2c7be Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 14 Sep 2023 22:01:50 -0400 Subject: [PATCH 51/53] consensus: Move v2 transaction restriction checks into ValidateTransaction This isn't just a cosmetic refactor: it means that the txpool will reject v2 txns before allow height. --- consensus/validation.go | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/consensus/validation.go b/consensus/validation.go index babb7279..e3158ade 100644 --- a/consensus/validation.go +++ b/consensus/validation.go @@ -92,12 +92,11 @@ func ValidateOrphan(s State, b types.Block) error { } else if err := validateHeader(s, b.ParentID, b.Timestamp, b.Nonce, b.ID()); err != nil { return err } - if b.V2 != nil { if b.V2.Height != s.Index.Height+1 { return errors.New("block height does not increment parent height") } else if b.V2.Commitment != s.Commitment(s.TransactionsCommitment(b.Transactions, b.V2Transactions()), b.MinerPayouts[0].Address) { - return errors.New("commitment hash does not match header") + return errors.New("commitment hash mismatch") } } return nil @@ -496,7 +495,9 @@ func validateSignatures(ms *MidState, txn types.Transaction) error { // ValidateTransaction validates txn within the context of ms and store. func ValidateTransaction(ms *MidState, txn types.Transaction, ts V1TransactionSupplement) error { - if err := validateCurrencyOverflow(ms, txn); err != nil { + if ms.base.childHeight() >= ms.base.Network.HardforkV2.RequireHeight { + return errors.New("v1 transactions are not allowed after v2 hardfork is complete") + } else if err := validateCurrencyOverflow(ms, txn); err != nil { return err } else if err := validateMinimumValues(ms, txn); err != nil { return err @@ -871,7 +872,9 @@ func validateFoundationUpdate(ms *MidState, txn types.V2Transaction) error { // ValidateV2Transaction validates txn within the context of ms. func ValidateV2Transaction(ms *MidState, txn types.V2Transaction) error { - if err := validateV2CurrencyValues(ms, txn); err != nil { + if ms.base.childHeight() < ms.base.Network.HardforkV2.AllowHeight { + return errors.New("v2 transactions are not allowed until v2 hardfork begins") + } else if err := validateV2CurrencyValues(ms, txn); err != nil { return err } else if err := validateV2Siacoins(ms, txn); err != nil { return err @@ -897,27 +900,17 @@ func ValidateBlock(s State, b types.Block, bs V1BlockSupplement) error { return err } ms := NewMidState(s) - if len(b.Transactions) > 0 { - if s.childHeight() >= ms.base.Network.HardforkV2.RequireHeight { - return errors.New("v1 transactions are not allowed after v2 hardfork is complete") - } - for i, txn := range b.Transactions { - if err := ValidateTransaction(ms, txn, bs.Transactions[i]); err != nil { - return fmt.Errorf("transaction %v is invalid: %w", i, err) - } - ms.ApplyTransaction(txn, bs.Transactions[i]) + for i, txn := range b.Transactions { + if err := ValidateTransaction(ms, txn, bs.Transactions[i]); err != nil { + return fmt.Errorf("transaction %v is invalid: %w", i, err) } + ms.ApplyTransaction(txn, bs.Transactions[i]) } - if b.V2 != nil { - if s.childHeight() < ms.base.Network.HardforkV2.AllowHeight { - return errors.New("v2 transactions are not allowed until v2 hardfork begins") - } - for i, txn := range b.V2.Transactions { - if err := ValidateV2Transaction(ms, txn); err != nil { - return fmt.Errorf("v2 transaction %v is invalid: %w", i, err) - } - ms.ApplyV2Transaction(txn) + for i, txn := range b.V2Transactions() { + if err := ValidateV2Transaction(ms, txn); err != nil { + return fmt.Errorf("v2 transaction %v is invalid: %w", i, err) } + ms.ApplyV2Transaction(txn) } return nil } From 3ca4ff703dc617d31029b3dfbc9930301a3cf3a1 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Thu, 14 Sep 2023 22:13:25 -0400 Subject: [PATCH 52/53] gateway: Refactor outlining --- gateway/encoding.go | 24 +++++++++----- gateway/gateway.go | 76 +++++++++++++++++++++++++++++++-------------- gateway/peer.go | 4 +-- 3 files changed, 71 insertions(+), 33 deletions(-) diff --git a/gateway/encoding.go b/gateway/encoding.go index d4950240..fa8ab506 100644 --- a/gateway/encoding.go +++ b/gateway/encoding.go @@ -84,24 +84,32 @@ func (h *V2BlockHeader) decodeFrom(d *types.Decoder) { } func (ot *OutlineTransaction) encodeTo(e *types.Encoder) { - ot.Hash.EncodeTo(e) if ot.Transaction != nil { - e.WriteBool(true) + e.WriteUint8(0) ot.Transaction.EncodeTo(e) - } else { - e.WriteBool(false) + } else if ot.V2Transaction != nil { + e.WriteUint8(1) ot.V2Transaction.EncodeTo(e) + } else { + e.WriteUint8(2) + ot.Hash.EncodeTo(e) } } func (ot *OutlineTransaction) decodeFrom(d *types.Decoder) { - ot.Hash.DecodeFrom(d) - if d.ReadBool() { + switch t := d.ReadUint8(); t { + case 0: ot.Transaction = new(types.Transaction) ot.Transaction.DecodeFrom(d) - } else { + ot.Hash = ot.Transaction.FullHash() + case 1: ot.V2Transaction = new(types.V2Transaction) ot.V2Transaction.DecodeFrom(d) + ot.Hash = ot.V2Transaction.FullHash() + case 2: + ot.Hash.DecodeFrom(d) + default: + d.SetErr(fmt.Errorf("invalid outline transaction type (%d)", t)) } } @@ -484,6 +492,8 @@ func objectForID(id types.Specifier) object { case idSendTransactions: return new(RPCSendTransactions) case idRelayV2Header: + return new(RPCRelayV2Header) + case idRelayV2BlockOutline: return new(RPCRelayV2BlockOutline) case idRelayV2TransactionSet: return new(RPCRelayV2TransactionSet) diff --git a/gateway/gateway.go b/gateway/gateway.go index 8edc1806..0f2f2855 100644 --- a/gateway/gateway.go +++ b/gateway/gateway.go @@ -130,26 +130,26 @@ type V2BlockOutline struct { Transactions []OutlineTransaction } -func (pb V2BlockOutline) commitment(cs consensus.State) types.Hash256 { +func (bo V2BlockOutline) commitment(cs consensus.State) types.Hash256 { var acc blake2b.Accumulator - for _, txn := range pb.Transactions { + for _, txn := range bo.Transactions { acc.AddLeaf(txn.Hash) } - return cs.Commitment(acc.Root(), pb.MinerAddress) + return cs.Commitment(acc.Root(), bo.MinerAddress) } // ID returns a hash that uniquely identifies the block. -func (pb V2BlockOutline) ID(cs consensus.State) types.BlockID { +func (bo V2BlockOutline) ID(cs consensus.State) types.BlockID { return (&types.Block{ - Nonce: pb.Nonce, - Timestamp: pb.Timestamp, - V2: &types.V2BlockData{Commitment: pb.commitment(cs)}, + Nonce: bo.Nonce, + Timestamp: bo.Timestamp, + V2: &types.V2BlockData{Commitment: bo.commitment(cs)}, }).ID() } // Missing returns the hashes of transactions that are missing from the block. -func (pb V2BlockOutline) Missing() (missing []types.Hash256) { - for _, txn := range pb.Transactions { +func (bo V2BlockOutline) Missing() (missing []types.Hash256) { + for _, txn := range bo.Transactions { if txn.Transaction == nil && txn.V2Transaction == nil { missing = append(missing, txn.Hash) } @@ -160,7 +160,7 @@ func (pb V2BlockOutline) Missing() (missing []types.Hash256) { // Complete attempts to reconstruct the original block using the supplied // transactions. If the block cannot be fully reconstructed, it returns the // hashes of the missing transactions. -func (pb *V2BlockOutline) Complete(cs consensus.State, txns []types.Transaction, v2txns []types.V2Transaction) (types.Block, []types.Hash256) { +func (bo *V2BlockOutline) Complete(cs consensus.State, txns []types.Transaction, v2txns []types.V2Transaction) (types.Block, []types.Hash256) { var v1hashes map[types.Hash256]types.Transaction var v2hashes map[types.Hash256]types.V2Transaction completeTxn := func(ptxn *OutlineTransaction) { @@ -190,17 +190,17 @@ func (pb *V2BlockOutline) Complete(cs consensus.State, txns []types.Transaction, } b := types.Block{ - ParentID: pb.ParentID, - Nonce: pb.Nonce, - Timestamp: pb.Timestamp, - MinerPayouts: []types.SiacoinOutput{{Address: pb.MinerAddress, Value: cs.BlockReward()}}, + ParentID: bo.ParentID, + Nonce: bo.Nonce, + Timestamp: bo.Timestamp, + MinerPayouts: []types.SiacoinOutput{{Address: bo.MinerAddress, Value: cs.BlockReward()}}, V2: &types.V2BlockData{ - Height: pb.Height, - Commitment: pb.commitment(cs), + Height: bo.Height, + Commitment: bo.commitment(cs), }, } - for i := range pb.Transactions { - ptxn := &pb.Transactions[i] + for i := range bo.Transactions { + ptxn := &bo.Transactions[i] completeTxn(ptxn) if ptxn.Transaction != nil { b.Transactions = append(b.Transactions, *ptxn.Transaction) @@ -212,11 +212,11 @@ func (pb *V2BlockOutline) Complete(cs consensus.State, txns []types.Transaction, b.MinerPayouts[0].Value = b.MinerPayouts[0].Value.Add(ptxn.V2Transaction.MinerFee) } } - return b, pb.Missing() + return b, bo.Missing() } // RemoveTransactions removes the specified transactions from the block. -func (pb *V2BlockOutline) RemoveTransactions(txns []types.Transaction, v2txns []types.V2Transaction) { +func (bo *V2BlockOutline) RemoveTransactions(txns []types.Transaction, v2txns []types.V2Transaction) { remove := make(map[types.Hash256]bool) for _, txn := range txns { remove[txn.FullHash()] = true @@ -224,14 +224,42 @@ func (pb *V2BlockOutline) RemoveTransactions(txns []types.Transaction, v2txns [] for _, txn := range v2txns { remove[txn.FullHash()] = true } - for i := range pb.Transactions { - if remove[pb.Transactions[i].Hash] { - pb.Transactions[i].Transaction = nil - pb.Transactions[i].V2Transaction = nil + for i := range bo.Transactions { + if remove[bo.Transactions[i].Hash] { + bo.Transactions[i].Transaction = nil + bo.Transactions[i].V2Transaction = nil } } } +// OutlineBlock returns a block outline for b that omits the specified +// transactions. +func OutlineBlock(b types.Block, txns []types.Transaction, v2txns []types.V2Transaction) V2BlockOutline { + var otxns []OutlineTransaction + for _, txn := range b.Transactions { + otxns = append(otxns, OutlineTransaction{ + Hash: txn.FullHash(), + Transaction: &txn, + }) + } + for _, txn := range b.V2Transactions() { + otxns = append(otxns, OutlineTransaction{ + Hash: txn.FullHash(), + V2Transaction: &txn, + }) + } + bo := V2BlockOutline{ + Height: b.V2.Height, + ParentID: b.ParentID, + Nonce: b.Nonce, + Timestamp: b.Timestamp, + MinerAddress: b.MinerPayouts[0].Address, + Transactions: otxns, + } + bo.RemoveTransactions(txns, v2txns) + return bo +} + // Dial initiates the gateway handshake with a peer. func Dial(conn net.Conn, ourHeader Header) (*Peer, error) { p := &Peer{ diff --git a/gateway/peer.go b/gateway/peer.go index 96915a0b..33cf7ac5 100644 --- a/gateway/peer.go +++ b/gateway/peer.go @@ -104,7 +104,7 @@ type RPCHandler interface { // v2 Transactions(index types.ChainIndex, txns []types.Hash256) ([]types.Transaction, []types.V2Transaction, error) Checkpoint(index types.ChainIndex) (types.Block, consensus.State, error) - RelayV2BlockHeader(h V2BlockHeader, origin *Peer) + RelayV2Header(h V2BlockHeader, origin *Peer) RelayV2BlockOutline(b V2BlockOutline, origin *Peer) RelayV2TransactionSet(txns []types.V2Transaction, origin *Peer) } @@ -196,7 +196,7 @@ func (p *Peer) HandleRPC(id types.Specifier, stream net.Conn, h RPCHandler) erro if err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { return err } - h.RelayV2BlockHeader(r.Header, p) + h.RelayV2Header(r.Header, p) return nil case *RPCRelayV2BlockOutline: if err := p.withDecoder(stream, r.maxRequestLen(), r.decodeRequest); err != nil { From a7b05a376dbd02239dc9720d9fc773d7d0a52e14 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sat, 16 Sep 2023 12:49:38 -0400 Subject: [PATCH 53/53] chain: Replace Checkpoint with separately-stored block/state This is more efficient for most queries, and it sidesteps the special case of the genesis state (which was not the result of applying a block). --- chain/db.go | 131 ++++++++++++++++------ chain/manager.go | 204 +++++++++++++++-------------------- chain/manager_test.go | 8 +- consensus/update_test.go | 107 ++++++++++++++++-- consensus/validation_test.go | 8 +- 5 files changed, 292 insertions(+), 166 deletions(-) diff --git a/chain/db.go b/chain/db.go index f30587c6..532d32b8 100644 --- a/chain/db.go +++ b/chain/db.go @@ -12,6 +12,48 @@ import ( "go.sia.tech/core/types" ) +type supplementedBlock struct { + Block types.Block + Supplement *consensus.V1BlockSupplement +} + +func (sb supplementedBlock) EncodeTo(e *types.Encoder) { + e.WriteUint8(2) + (types.V2Block)(sb.Block).EncodeTo(e) + e.WriteBool(sb.Supplement != nil) + if sb.Supplement != nil { + sb.Supplement.EncodeTo(e) + } +} + +func (sb *supplementedBlock) DecodeFrom(d *types.Decoder) { + if v := d.ReadUint8(); v != 2 { + d.SetErr(fmt.Errorf("incompatible version (%d)", v)) + } + (*types.V2Block)(&sb.Block).DecodeFrom(d) + if d.ReadBool() { + sb.Supplement = new(consensus.V1BlockSupplement) + sb.Supplement.DecodeFrom(d) + } +} + +type versionedState struct { + State consensus.State +} + +func (vs versionedState) EncodeTo(e *types.Encoder) { + e.WriteUint8(2) + vs.State.EncodeTo(e) + +} + +func (vs *versionedState) DecodeFrom(d *types.Decoder) { + if v := d.ReadUint8(); v != 2 { + d.SetErr(fmt.Errorf("incompatible version (%d)", v)) + } + vs.State.DecodeFrom(d) +} + // A DB is a generic key-value database. type DB interface { Bucket(name []byte) DBBucket @@ -189,7 +231,8 @@ func (b *dbBucket) delete(key []byte) { var ( bVersion = []byte("Version") bMainChain = []byte("MainChain") - bCheckpoints = []byte("Checkpoints") + bStates = []byte("States") + bBlocks = []byte("Blocks") bFileContractElements = []byte("FileContracts") bSiacoinElements = []byte("SiacoinElements") bSiafundElements = []byte("SiafundElements") @@ -201,7 +244,7 @@ var ( // DBStore implements Store using a key-value database. type DBStore struct { db DB - n *consensus.Network // for getCheckpoint + n *consensus.Network // for getState enc types.Encoder unflushed int @@ -236,8 +279,26 @@ func (db *DBStore) putHeight(height uint64) { db.bucket(bMainChain).putRaw(keyHeight, db.encHeight(height)) } -func (db *DBStore) putCheckpoint(c Checkpoint) { - db.bucket(bCheckpoints).put(c.State.Index.ID[:], c) +func (db *DBStore) getState(id types.BlockID) (consensus.State, bool) { + var vs versionedState + ok := db.bucket(bStates).get(id[:], &vs) + vs.State.Network = db.n + return vs.State, ok +} + +func (db *DBStore) putState(cs consensus.State) { + db.bucket(bStates).put(cs.Index.ID[:], versionedState{cs}) +} + +func (db *DBStore) getBlock(id types.BlockID) (b types.Block, bs *consensus.V1BlockSupplement, _ bool) { + var sb supplementedBlock + ok := db.bucket(bBlocks).get(id[:], &sb) + return sb.Block, sb.Supplement, ok +} + +func (db *DBStore) putBlock(b types.Block, bs *consensus.V1BlockSupplement) { + id := b.ID() + db.bucket(bBlocks).put(id[:], supplementedBlock{b, bs}) } func (db *DBStore) encLeaf(index uint64, height int) []byte { @@ -466,8 +527,8 @@ func (db *DBStore) BestIndex(height uint64) (index types.ChainIndex, ok bool) { func (db *DBStore) SupplementTipTransaction(txn types.Transaction) (ts consensus.V1TransactionSupplement) { // get tip state, for proof-trimming index, _ := db.BestIndex(db.getHeight()) - c, _ := db.Checkpoint(index.ID) - numLeaves := c.State.Elements.NumLeaves + cs, _ := db.State(index.ID) + numLeaves := cs.Elements.NumLeaves for _, sci := range txn.SiacoinInputs { if sce, ok := db.getSiacoinElement(sci.ParentID, numLeaves); ok { @@ -499,8 +560,8 @@ func (db *DBStore) SupplementTipTransaction(txn types.Transaction) (ts consensus func (db *DBStore) SupplementTipBlock(b types.Block) (bs consensus.V1BlockSupplement) { // get tip state, for proof-trimming index, _ := db.BestIndex(db.getHeight()) - c, _ := db.Checkpoint(index.ID) - numLeaves := c.State.Elements.NumLeaves + cs, _ := db.State(index.ID) + numLeaves := cs.Elements.NumLeaves bs = consensus.V1BlockSupplement{ Transactions: make([]consensus.V1TransactionSupplement, len(b.Transactions)), @@ -519,16 +580,24 @@ func (db *DBStore) SupplementTipBlock(b types.Block) (bs consensus.V1BlockSupple return bs } -// AddCheckpoint implements Store. -func (db *DBStore) AddCheckpoint(c Checkpoint) { - db.bucket(bCheckpoints).put(c.State.Index.ID[:], c) +// State implements Store. +func (db *DBStore) State(id types.BlockID) (consensus.State, bool) { + return db.getState(id) } -// Checkpoint implements Store. -func (db *DBStore) Checkpoint(id types.BlockID) (c Checkpoint, ok bool) { - ok = db.bucket(bCheckpoints).get(id[:], &c) - c.State.Network = db.n - return +// AddState implements Store. +func (db *DBStore) AddState(cs consensus.State) { + db.putState(cs) +} + +// Block implements Store. +func (db *DBStore) Block(id types.BlockID) (types.Block, *consensus.V1BlockSupplement, bool) { + return db.getBlock(id) +} + +// AddBlock implements Store. +func (db *DBStore) AddBlock(b types.Block, bs *consensus.V1BlockSupplement) { + db.putBlock(b, bs) } func (db *DBStore) shouldFlush() bool { @@ -572,9 +641,9 @@ func (db *DBStore) Close() error { return db.db.Flush() } -// NewDBStore creates a new DBStore using the provided database. The current -// checkpoint is also returned. -func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBStore, _ Checkpoint, err error) { +// NewDBStore creates a new DBStore using the provided database. The tip state +// is also returned. +func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBStore, _ consensus.State, err error) { // during initialization, we should return an error instead of panicking defer func() { if r := recover(); r != nil { @@ -585,7 +654,7 @@ func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBSto // don't accidentally overwrite a siad database if db.Bucket([]byte("ChangeLog")) != nil { - return nil, Checkpoint{}, errors.New("detected siad database, refusing to proceed") + return nil, consensus.State{}, errors.New("detected siad database, refusing to proceed") } dbs := &DBStore{ @@ -599,7 +668,8 @@ func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBSto for _, bucket := range [][]byte{ bVersion, bMainChain, - bCheckpoints, + bStates, + bBlocks, bFileContractElements, bSiacoinElements, bSiafundElements, @@ -611,28 +681,29 @@ func NewDBStore(db DB, n *consensus.Network, genesisBlock types.Block) (_ *DBSto } dbs.bucket(bVersion).putRaw(bVersion, []byte{1}) - // store genesis checkpoint and apply its effects + // store genesis state and apply genesis block to it genesisState := n.GenesisState() - dbs.putCheckpoint(Checkpoint{types.Block{}, genesisState, &consensus.V1BlockSupplement{}}) + dbs.putState(genesisState) bs := consensus.V1BlockSupplement{Transactions: make([]consensus.V1TransactionSupplement, len(genesisBlock.Transactions))} cs, cau := consensus.ApplyBlock(genesisState, genesisBlock, bs, time.Time{}) - dbs.putCheckpoint(Checkpoint{genesisBlock, cs, &bs}) + dbs.putBlock(genesisBlock, &bs) + dbs.putState(cs) dbs.ApplyBlock(cs, cau, true) } else if dbGenesis.ID != genesisBlock.ID() { // try to detect network so we can provide a more helpful error message _, mainnetGenesis := Mainnet() _, zenGenesis := TestnetZen() if genesisBlock.ID() == mainnetGenesis.ID() && dbGenesis.ID == zenGenesis.ID() { - return nil, Checkpoint{}, errors.New("cannot use Zen testnet database on mainnet") + return nil, consensus.State{}, errors.New("cannot use Zen testnet database on mainnet") } else if genesisBlock.ID() == zenGenesis.ID() && dbGenesis.ID == mainnetGenesis.ID() { - return nil, Checkpoint{}, errors.New("cannot use mainnet database on Zen testnet") + return nil, consensus.State{}, errors.New("cannot use mainnet database on Zen testnet") } else { - return nil, Checkpoint{}, errors.New("database previously initialized with different genesis block") + return nil, consensus.State{}, errors.New("database previously initialized with different genesis block") } } - // load current checkpoint + // load tip state index, _ := dbs.BestIndex(dbs.getHeight()) - c, _ := dbs.Checkpoint(index.ID) - return dbs, c, err + cs, _ := dbs.State(index.ID) + return dbs, cs, err } diff --git a/chain/manager.go b/chain/manager.go index df39b82c..42a9e496 100644 --- a/chain/manager.go +++ b/chain/manager.go @@ -16,42 +16,6 @@ var ( ErrFutureBlock = errors.New("block's timestamp is too far in the future") ) -// A Checkpoint pairs a block with its resulting chain state. -type Checkpoint struct { - Block types.Block - State consensus.State - Supplement *consensus.V1BlockSupplement -} - -// EncodeTo implements types.EncoderTo. -func (c Checkpoint) EncodeTo(e *types.Encoder) { - e.WriteUint8(2) // block (and supplement) version - types.V2Block(c.Block).EncodeTo(e) - e.WriteUint8(2) // state version - c.State.EncodeTo(e) - e.WriteBool(c.Supplement != nil) - if c.Supplement != nil { - c.Supplement.EncodeTo(e) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (c *Checkpoint) DecodeFrom(d *types.Decoder) { - v := d.ReadUint8() - if v != 2 { - d.SetErr(fmt.Errorf("incompatible block version (%d)", v)) - } - (*types.V2Block)(&c.Block).DecodeFrom(d) - if v := d.ReadUint8(); v != 2 { - d.SetErr(fmt.Errorf("incompatible state version (%d)", v)) - } - c.State.DecodeFrom(d) - if d.ReadBool() { - c.Supplement = new(consensus.V1BlockSupplement) - c.Supplement.DecodeFrom(d) - } -} - // An ApplyUpdate reflects the changes to the blockchain resulting from the // addition of a block. type ApplyUpdate struct { @@ -85,8 +49,10 @@ type Store interface { SupplementTipTransaction(txn types.Transaction) consensus.V1TransactionSupplement SupplementTipBlock(b types.Block) consensus.V1BlockSupplement - AddCheckpoint(c Checkpoint) - Checkpoint(id types.BlockID) (Checkpoint, bool) + Block(id types.BlockID) (types.Block, *consensus.V1BlockSupplement, bool) + AddBlock(b types.Block, bs *consensus.V1BlockSupplement) + State(id types.BlockID) (consensus.State, bool) + AddState(cs consensus.State) // Except when mustCommit is set, ApplyBlock and RevertBlock are free to // commit whenever they see fit. @@ -94,23 +60,39 @@ type Store interface { RevertBlock(s consensus.State, cru consensus.RevertUpdate) } +// blockAndParent returns the block with the specified ID, along with its parent +// state. +func blockAndParent(s Store, id types.BlockID) (types.Block, *consensus.V1BlockSupplement, consensus.State, bool) { + b, bs, ok := s.Block(id) + cs, ok2 := s.State(b.ParentID) + return b, bs, cs, ok && ok2 +} + +// blockAndChild returns the block with the specified ID, along with its child +// state. +func blockAndChild(s Store, id types.BlockID) (types.Block, *consensus.V1BlockSupplement, consensus.State, bool) { + b, bs, ok := s.Block(id) + cs, ok2 := s.State(id) + return b, bs, cs, ok && ok2 +} + // ancestorTimestamp returns the timestamp of the n'th ancestor of id. func ancestorTimestamp(s Store, id types.BlockID, n uint64) time.Time { - c, _ := s.Checkpoint(id) + b, _, cs, _ := blockAndChild(s, id) for i := uint64(1); i < n; i++ { // if we're on the best path, we can jump to the n'th block directly - if index, _ := s.BestIndex(c.State.Index.Height); index.ID == id { - height := c.State.Index.Height - (n - i) - if c.State.Index.Height < (n - i) { + if index, _ := s.BestIndex(cs.Index.Height); index.ID == id { + height := cs.Index.Height - (n - i) + if cs.Index.Height < (n - i) { height = 0 } ancestorIndex, _ := s.BestIndex(height) - c, _ = s.Checkpoint(ancestorIndex.ID) + b, _, _ = s.Block(ancestorIndex.ID) break } - c, _ = s.Checkpoint(c.Block.ParentID) + b, _, cs, _ = blockAndChild(s, b.ParentID) } - return c.Block.Timestamp + return b.Timestamp } // A Manager tracks multiple blockchains and identifies the best valid @@ -152,15 +134,14 @@ func (m *Manager) Tip() types.ChainIndex { // SyncCheckpoint returns the block at the specified index, along with its // parent state. func (m *Manager) SyncCheckpoint(index types.ChainIndex) (types.Block, consensus.State, bool) { - c, ok := m.store.Checkpoint(index.ID) - pc, ok2 := m.store.Checkpoint(c.Block.ParentID) - return c.Block, pc.State, ok && ok2 + b, _, cs, ok := blockAndParent(m.store, index.ID) + return b, cs, ok } // Block returns the block with the specified ID. func (m *Manager) Block(id types.BlockID) (types.Block, bool) { - c, ok := m.store.Checkpoint(id) - return c.Block, ok + b, _, ok := m.store.Block(id) + return b, ok } // BestIndex returns the index of the block at the specified height within the @@ -208,10 +189,10 @@ func (m *Manager) BlocksForHistory(history []types.BlockID, max uint64) ([]types defer m.mu.Unlock() var attachHeight uint64 for _, id := range history { - if c, ok := m.store.Checkpoint(id); !ok { + if cs, ok := m.store.State(id); !ok { continue - } else if index, ok := m.store.BestIndex(c.State.Index.Height); ok && index == c.State.Index { - attachHeight = c.State.Index.Height + } else if index, ok := m.store.BestIndex(cs.Index.Height); ok && index == cs.Index { + attachHeight = cs.Index.Height break } } @@ -221,11 +202,11 @@ func (m *Manager) BlocksForHistory(history []types.BlockID, max uint64) ([]types blocks := make([]types.Block, max) for i := range blocks { index, _ := m.store.BestIndex(attachHeight + uint64(i) + 1) - c, ok := m.store.Checkpoint(index.ID) + b, _, ok := m.store.Block(index.ID) if !ok { return nil, 0, fmt.Errorf("missing block %v", index) } - blocks[i] = c.Block + blocks[i] = b } return blocks, m.tipState.Index.Height - (attachHeight + max), nil } @@ -242,18 +223,16 @@ func (m *Manager) AddBlocks(blocks []types.Block) error { cs := m.tipState for _, b := range blocks { bid := b.ID() + var ok bool if err := m.invalidBlocks[bid]; err != nil { return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: bid}, err) - } else if c, ok := m.store.Checkpoint(bid); ok { + } else if cs, ok = m.store.State(bid); ok { // already have this block - cs = c.State continue - } else if b.ParentID != c.State.Index.ID { - c, ok := m.store.Checkpoint(b.ParentID) - if !ok { - return fmt.Errorf("missing parent checkpoint for block %v", bid) + } else if b.ParentID != cs.Index.ID { + if cs, ok = m.store.State(b.ParentID); !ok { + return fmt.Errorf("missing parent state for block %v", bid) } - cs = c.State } if b.Timestamp.After(cs.MaxFutureTimestamp(time.Now())) { return ErrFutureBlock @@ -262,7 +241,8 @@ func (m *Manager) AddBlocks(blocks []types.Block) error { return fmt.Errorf("block %v is invalid: %w", types.ChainIndex{Height: cs.Index.Height + 1, ID: bid}, err) } cs = consensus.ApplyOrphan(cs, b, ancestorTimestamp(m.store, b.ParentID, cs.AncestorDepth())) - m.store.AddCheckpoint(Checkpoint{b, cs, nil}) + m.store.AddState(cs) + m.store.AddBlock(b, nil) } // if this chain is now the best chain, trigger a reorg @@ -292,18 +272,14 @@ func (m *Manager) markBadBlock(bid types.BlockID, err error) { // revertTip reverts the current tip. func (m *Manager) revertTip() error { - c, ok := m.store.Checkpoint(m.tipState.Index.ID) - if !ok { - return fmt.Errorf("missing checkpoint for index %v", m.tipState.Index) - } - pc, ok := m.store.Checkpoint(c.Block.ParentID) + b, bs, cs, ok := blockAndParent(m.store, m.tipState.Index.ID) if !ok { - return fmt.Errorf("missing checkpoint for block %v", c.Block.ParentID) + return fmt.Errorf("missing block at index %v", m.tipState.Index) } - cru := consensus.RevertBlock(pc.State, c.Block, *c.Supplement) - m.store.RevertBlock(pc.State, cru) + cru := consensus.RevertBlock(cs, b, *bs) + m.store.RevertBlock(cs, cru) - update := RevertUpdate{cru, c.Block, pc.State} + update := RevertUpdate{cru, b, cs} for _, s := range m.subscribers { if err := s.ProcessChainRevertUpdate(&update); err != nil { return fmt.Errorf("subscriber %T: %w", s, err) @@ -311,43 +287,44 @@ func (m *Manager) revertTip() error { } m.revertPoolUpdate(cru) - m.tipState = pc.State + m.tipState = cs return nil } // applyTip adds a block to the current tip. func (m *Manager) applyTip(index types.ChainIndex) error { var cau consensus.ApplyUpdate - c, ok := m.store.Checkpoint(index.ID) + b, bs, cs, ok := blockAndChild(m.store, index.ID) if !ok { - return fmt.Errorf("missing checkpoint for index %v", index) - } else if c.Block.ParentID != m.tipState.Index.ID { + return fmt.Errorf("missing block at index %v", index) + } else if b.ParentID != m.tipState.Index.ID { panic("applyTip called with non-attaching block") - } else if c.Supplement == nil { - bs := m.store.SupplementTipBlock(c.Block) - if err := consensus.ValidateBlock(m.tipState, c.Block, bs); err != nil { + } else if bs == nil { + bs = new(consensus.V1BlockSupplement) + *bs = m.store.SupplementTipBlock(b) + if err := consensus.ValidateBlock(m.tipState, b, *bs); err != nil { m.markBadBlock(index.ID, err) return fmt.Errorf("block %v is invalid: %w", index, err) } - c.Supplement = &bs - targetTimestamp := ancestorTimestamp(m.store, c.Block.ParentID, m.tipState.AncestorDepth()) - c.State, cau = consensus.ApplyBlock(m.tipState, c.Block, bs, targetTimestamp) - m.store.AddCheckpoint(c) + targetTimestamp := ancestorTimestamp(m.store, b.ParentID, m.tipState.AncestorDepth()) + cs, cau = consensus.ApplyBlock(m.tipState, b, *bs, targetTimestamp) + m.store.AddState(cs) + m.store.AddBlock(b, bs) } else { - targetTimestamp := ancestorTimestamp(m.store, c.Block.ParentID, m.tipState.AncestorDepth()) - _, cau = consensus.ApplyBlock(m.tipState, c.Block, *c.Supplement, targetTimestamp) + targetTimestamp := ancestorTimestamp(m.store, b.ParentID, m.tipState.AncestorDepth()) + _, cau = consensus.ApplyBlock(m.tipState, b, *bs, targetTimestamp) } // force the store to commit if we're at the tip (or close to it), or at // least every 2 seconds; this ensures that the amount of uncommitted data // never grows too large - forceCommit := time.Since(c.Block.Timestamp) < c.State.BlockInterval()*2 || time.Since(m.lastCommit) > 2*time.Second - committed := m.store.ApplyBlock(c.State, cau, forceCommit) + forceCommit := time.Since(b.Timestamp) < cs.BlockInterval()*2 || time.Since(m.lastCommit) > 2*time.Second + committed := m.store.ApplyBlock(cs, cau, forceCommit) if committed { m.lastCommit = time.Now() } - update := &ApplyUpdate{cau, c.Block, c.State} + update := &ApplyUpdate{cau, b, cs} for _, s := range m.subscribers { if err := s.ProcessChainApplyUpdate(update, committed); err != nil { return fmt.Errorf("subscriber %T: %w", s, err) @@ -355,7 +332,7 @@ func (m *Manager) applyTip(index types.ChainIndex) error { } m.applyPoolUpdate(cau) - m.tipState = c.State + m.tipState = cs return nil } @@ -366,9 +343,9 @@ func (m *Manager) reorgPath(a, b types.ChainIndex) (revert, apply []types.ChainI if bi, _ := m.store.BestIndex(index.Height); bi.ID == index.ID { *index, ok = m.store.BestIndex(index.Height - 1) } else { - var c Checkpoint - c, ok = m.store.Checkpoint(index.ID) - *index = types.ChainIndex{Height: index.Height - 1, ID: c.Block.ParentID} + var b types.Block + b, _, ok = m.store.Block(index.ID) + *index = types.ChainIndex{Height: index.Height - 1, ID: b.ParentID} } return ok } @@ -431,11 +408,9 @@ func (m *Manager) reorgTo(index types.ChainIndex) error { m.txpool.medianFee = nil m.txpool.parentMap = nil if len(revert) > 0 { - c, _ := m.store.Checkpoint(revert[0].ID) - m.txpool.lastReverted = c.Block.Transactions - if c.Block.V2 != nil { - m.txpool.lastRevertedV2 = c.Block.V2.Transactions - } + b, _, _ := m.store.Block(revert[0].ID) + m.txpool.lastReverted = b.Transactions + m.txpool.lastRevertedV2 = b.V2Transactions() } return nil @@ -454,36 +429,28 @@ func (m *Manager) AddSubscriber(s Subscriber, tip types.ChainIndex) error { return fmt.Errorf("couldn't determine reorg path from %v to %v: %w", tip, m.tipState.Index, err) } for _, index := range revert { - c, ok := m.store.Checkpoint(index.ID) + b, bs, cs, ok := blockAndParent(m.store, index.ID) if !ok { - return fmt.Errorf("missing revert checkpoint %v", index) - } else if c.Supplement == nil { + return fmt.Errorf("missing reverted block at index %v", index) + } else if bs == nil { panic("missing supplement for reverted block") } - pc, ok := m.store.Checkpoint(c.Block.ParentID) - if !ok { - return fmt.Errorf("missing revert parent checkpoint %v", c.Block.ParentID) - } - cru := consensus.RevertBlock(pc.State, c.Block, *c.Supplement) - if err := s.ProcessChainRevertUpdate(&RevertUpdate{cru, c.Block, pc.State}); err != nil { + cru := consensus.RevertBlock(cs, b, *bs) + if err := s.ProcessChainRevertUpdate(&RevertUpdate{cru, b, cs}); err != nil { return fmt.Errorf("couldn't process revert update: %w", err) } } for _, index := range apply { - c, ok := m.store.Checkpoint(index.ID) + b, bs, cs, ok := blockAndParent(m.store, index.ID) if !ok { - return fmt.Errorf("missing apply checkpoint %v", index) - } else if c.Supplement == nil { + return fmt.Errorf("missing applied block at index %v", index) + } else if bs == nil { panic("missing supplement for applied block") } - pc, ok := m.store.Checkpoint(c.Block.ParentID) - if !ok { - return fmt.Errorf("missing apply parent checkpoint %v", c.Block.ParentID) - } - _, cau := consensus.ApplyBlock(pc.State, c.Block, *c.Supplement, ancestorTimestamp(m.store, c.Block.ParentID, pc.State.AncestorDepth())) + cs, cau := consensus.ApplyBlock(cs, b, *bs, ancestorTimestamp(m.store, b.ParentID, cs.AncestorDepth())) // TODO: commit every minute for large len(apply)? shouldCommit := index == m.tipState.Index - if err := s.ProcessChainApplyUpdate(&ApplyUpdate{cau, c.Block, c.State}, shouldCommit); err != nil { + if err := s.ProcessChainApplyUpdate(&ApplyUpdate{cau, b, cs}, shouldCommit); err != nil { return fmt.Errorf("couldn't process apply update: %w", err) } } @@ -619,10 +586,9 @@ func (m *Manager) computeMedianFee() types.Currency { prevFees := make([]types.Currency, 0, 10) for i := uint64(0); i < 10; i++ { index, ok1 := m.store.BestIndex(m.tipState.Index.Height - i) - c, ok2 := m.store.Checkpoint(index.ID) - pc, ok3 := m.store.Checkpoint(c.Block.ParentID) - if ok1 && ok2 && ok3 { - prevFees = append(prevFees, calculateBlockMedianFee(pc.State, c.Block)) + b, _, cs, ok2 := blockAndParent(m.store, index.ID) + if ok1 && ok2 { + prevFees = append(prevFees, calculateBlockMedianFee(cs, b)) } } sort.Slice(prevFees, func(i, j int) bool { return prevFees[i].Cmp(prevFees[j]) < 0 }) diff --git a/chain/manager_test.go b/chain/manager_test.go index 497761ac..cd297491 100644 --- a/chain/manager_test.go +++ b/chain/manager_test.go @@ -39,12 +39,12 @@ func TestManager(t *testing.T) { n.InitialTarget = types.BlockID{0xFF} - store, checkpoint, err := NewDBStore(NewMemDB(), n, genesisBlock) + store, tipState, err := NewDBStore(NewMemDB(), n, genesisBlock) if err != nil { t.Fatal(err) } defer store.Close() - cm := NewManager(store, checkpoint.State) + cm := NewManager(store, tipState) var hs historySubscriber cm.AddSubscriber(&hs, cm.Tip()) @@ -115,11 +115,11 @@ func TestTxPool(t *testing.T) { } genesisBlock.Transactions = []types.Transaction{giftTxn} - store, checkpoint, err := NewDBStore(NewMemDB(), n, genesisBlock) + store, tipState, err := NewDBStore(NewMemDB(), n, genesisBlock) if err != nil { t.Fatal(err) } - cm := NewManager(store, checkpoint.State) + cm := NewManager(store, tipState) signTxn := func(txn *types.Transaction) { for _, sci := range txn.SiacoinInputs { diff --git a/consensus/update_test.go b/consensus/update_test.go index c21b6d40..c94387f8 100644 --- a/consensus/update_test.go +++ b/consensus/update_test.go @@ -12,20 +12,23 @@ import ( ) func ancestorTimestamp(s chain.Store, id types.BlockID, n uint64) time.Time { - c, _ := s.Checkpoint(id) + b, _, _ := s.Block(id) + cs, _ := s.State(id) for i := uint64(1); i < n; i++ { - if index, _ := s.BestIndex(c.State.Index.Height); index.ID == id { - height := c.State.Index.Height - (n - i) - if c.State.Index.Height < (n - i) { + // if we're on the best path, we can jump to the n'th block directly + if index, _ := s.BestIndex(cs.Index.Height); index.ID == id { + height := cs.Index.Height - (n - i) + if cs.Index.Height < (n - i) { height = 0 } ancestorIndex, _ := s.BestIndex(height) - c, _ = s.Checkpoint(ancestorIndex.ID) + b, _, _ = s.Block(ancestorIndex.ID) break } - c, _ = s.Checkpoint(c.Block.ParentID) + b, _, _ = s.Block(b.ParentID) + cs, _ = s.State(b.ParentID) } - return c.Block.Timestamp + return b.Timestamp } func TestApplyBlock(t *testing.T) { @@ -48,12 +51,12 @@ func TestApplyBlock(t *testing.T) { } genesisBlock.Transactions = []types.Transaction{giftTxn} - dbStore, checkpoint, err := chain.NewDBStore(chain.NewMemDB(), n, genesisBlock) + dbStore, tipState, err := chain.NewDBStore(chain.NewMemDB(), n, genesisBlock) if err != nil { t.Fatal(err) } defer dbStore.Close() - cs := checkpoint.State + cs := tipState signTxn := func(txn *types.Transaction) { appendSig := func(parentID types.Hash256) { @@ -188,3 +191,89 @@ func TestApplyBlock(t *testing.T) { checkUpdateElements(au, addedSCEs, spentSCEs, addedSFEs, spentSFEs) } } + +/* +func TestApplyV2Block(t *testing.T) { + n, genesisBlock := chain.TestnetZen() + n.InitialTarget = types.BlockID{0xFF} + n.HardforkDevAddr.Height = 0 + n.HardforkTax.Height = 0 + n.HardforkStorageProof.Height = 0 + n.HardforkOak.Height = 0 + n.HardforkASIC.Height = 0 + n.HardforkFoundation.Height = 0 + n.HardforkV2.AllowHeight = 1 + n.HardforkV2.RequireHeight = 1 + + cs := n.GenesisState() + bs := consensus.V1BlockSupplement{Transactions: make([]consensus.V1TransactionSupplement, len(genesisBlock.Transactions))} + cs, cau := consensus.ApplyBlock(cs, genesisBlock, bs, time.Time{}) + var sces []types.SiacoinElement + cau.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { sces = append(sces, sce) }) + for i := range sces { + if !cs.Elements.ContainsUnspentSiacoinElement(sces[i]) { + t.Fatal("missing siacoin element") + } + } + + b := types.Block{ + ParentID: cs.Index.ID, + Timestamp: types.CurrentTimestamp(), + MinerPayouts: []types.SiacoinOutput{{Address: types.VoidAddress, Value: cs.BlockReward()}}, + } + cs, cau = consensus.ApplyBlock(cs, b, consensus.V1BlockSupplement{}, time.Time{}) + for i := range sces { + cau.UpdateElementProof(&sces[i].StateElement) + } + b = types.Block{ + ParentID: cs.Index.ID, + Timestamp: types.CurrentTimestamp(), + MinerPayouts: []types.SiacoinOutput{{Address: types.VoidAddress, Value: cs.BlockReward()}}, + V2: &types.V2BlockData{ + Transactions: []types.V2Transaction{{ + SiacoinInputs: []types.V2SiacoinInput{{Parent: sces[1]}}, + MinerFee: sces[1].SiacoinOutput.Value, + }}, + }, + } + b.V2.Transactions[0].SiacoinOutputs = append(b.V2.Transactions[0].SiacoinOutputs, make([]types.SiacoinOutput, 30)...) + cs, cau = consensus.ApplyBlock(cs, b, consensus.V1BlockSupplement{}, time.Time{}) + for i := range sces { + cau.UpdateElementProof(&sces[i].StateElement) + cau.UpdateElementProof(&sces[i].StateElement) + cau.UpdateElementProof(&sces[i].StateElement) + } + if !cs.Elements.ContainsUnspentSiacoinElement(sces[0]) { + t.Error("missing siacoin element", 0) + } + if !cs.Elements.ContainsSpentSiacoinElement(sces[1]) { + t.Error("missing siacoin element", 1) + } + + b = types.Block{ + ParentID: cs.Index.ID, + Timestamp: types.CurrentTimestamp(), + MinerPayouts: []types.SiacoinOutput{{Address: types.VoidAddress, Value: cs.BlockReward()}}, + V2: &types.V2BlockData{ + Transactions: []types.V2Transaction{{ + SiacoinInputs: []types.V2SiacoinInput{{Parent: sces[0]}}, + MinerFee: sces[0].SiacoinOutput.Value, + }}, + }, + } + b.V2.Transactions[0].SiacoinOutputs = append(b.V2.Transactions[0].SiacoinOutputs, make([]types.SiacoinOutput, 30)...) + cs, cau = consensus.ApplyBlock(cs, b, consensus.V1BlockSupplement{}, time.Time{}) + for i := range sces { + cau.UpdateElementProof(&sces[i].StateElement) + cau.UpdateElementProof(&sces[i].StateElement) + cau.UpdateElementProof(&sces[i].StateElement) + } + if !cs.Elements.ContainsSpentSiacoinElement(sces[0]) { + t.Error("missing siacoin element", 0) + } + if !cs.Elements.ContainsSpentSiacoinElement(sces[1]) { + t.Error("missing siacoin element", 1) + } +} + +*/ diff --git a/consensus/validation_test.go b/consensus/validation_test.go index df4aca41..39422d6e 100644 --- a/consensus/validation_test.go +++ b/consensus/validation_test.go @@ -59,11 +59,11 @@ func TestValidateBlock(t *testing.T) { } genesisBlock.Transactions = []types.Transaction{giftTxn} - dbStore, checkpoint, err := chain.NewDBStore(chain.NewMemDB(), n, genesisBlock) + dbStore, tipState, err := chain.NewDBStore(chain.NewMemDB(), n, genesisBlock) if err != nil { t.Fatal(err) } - cs := checkpoint.State + cs := tipState signTxn := func(txn *types.Transaction) { appendSig := func(key types.PrivateKey, pubkeyIndex uint64, parentID types.Hash256) { @@ -515,11 +515,11 @@ func TestValidateV2Block(t *testing.T) { fces = append(fces, fce) }) - dbStore, checkpoint, err := chain.NewDBStore(chain.NewMemDB(), n, genesisBlock) + dbStore, tipState, err := chain.NewDBStore(chain.NewMemDB(), n, genesisBlock) if err != nil { t.Fatal(err) } - cs := checkpoint.State + cs := tipState fc := v2GiftFC fc.TotalCollateral = fc.HostOutput.Value