diff --git a/bold b/bold index ce48f994c0..ca28c93f17 160000 --- a/bold +++ b/bold @@ -1 +1 @@ -Subproject commit ce48f994c0cead5cfb724ced42e5541a24d8c4e0 +Subproject commit ca28c93f17be872adb885f019ec9a18eb4a0114a diff --git a/go.mod b/go.mod index 18bdeaddb1..f985260ca3 100644 --- a/go.mod +++ b/go.mod @@ -223,6 +223,7 @@ require ( github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -240,6 +241,8 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/testify v1.8.4 // indirect github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect @@ -276,6 +279,7 @@ require ( google.golang.org/grpc v1.53.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect nhooyr.io/websocket v1.8.7 // indirect ) @@ -302,7 +306,7 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect - github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.2.2 github.com/huin/goupnp v1.1.0 // indirect diff --git a/go.sum b/go.sum index e28fdcc6dc..0723231d2d 100644 --- a/go.sum +++ b/go.sum @@ -225,6 +225,7 @@ github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoG github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codeclysm/extract/v3 v3.0.2 h1:sB4LcE3Php7LkhZwN0n2p8GCwZe92PEQutdbGURf5xc= github.com/codeclysm/extract/v3 v3.0.2/go.mod h1:NKsw+hqua9H+Rlwy/w/3Qgt9jDonYEgB6wJu+25eOKw= @@ -263,6 +264,7 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -378,6 +380,7 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -595,8 +598,10 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= +github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -868,6 +873,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -1242,6 +1248,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= @@ -1383,6 +1390,7 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1565,6 +1573,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -1618,6 +1628,7 @@ github.com/wealdtech/go-merkletree v1.0.0/go.mod h1:cdil512d/8ZC7Kx3bfrDvGMQXB25 github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa h1:EyA027ZAkuaCLoxVX4r1TZMPy1d31fM6hbfQ4OU4I5o= github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index 1a79ff507c..923dbd26ce 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -18,8 +18,10 @@ Use cases: wavm-module-root-0xab/ message-num-70/ roots.txt - big-step-100/ + subchallenge-level-0-big-step-100/ roots.txt + subchallenge-level-1-big-step-100/ + roots.txt We namespace top-level block challenges by wavm module root. Then, we can retrieve the state roots for any data within a challenge or associated subchallenge based on the hierarchy above. @@ -36,7 +38,7 @@ import ( "path/filepath" protocol "github.com/OffchainLabs/bold/chain-abstraction" - "github.com/OffchainLabs/bold/containers/option" + l2stateprovider "github.com/OffchainLabs/bold/layer2-state-provider" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -49,11 +51,13 @@ var ( wavmModuleRootPrefix = "wavm-module-root" messageNumberPrefix = "message-num" bigStepPrefix = "big-step" + challengeLevelPrefix = "subchallenge-level" + srvlog = log.New("service", "bold-history-commit-cache") ) // HistoryCommitmentCacher can retrieve history commitment state roots given lookup keys. type HistoryCommitmentCacher interface { - Get(lookup *Key, readUpTo protocol.Height) ([]common.Hash, error) + Get(lookup *Key, numToRead uint64) ([]common.Hash, error) Put(lookup *Key, stateRoots []common.Hash) error } @@ -74,7 +78,7 @@ func New(baseDir string) *Cache { type Key struct { WavmModuleRoot common.Hash MessageHeight protocol.Height - BigStepHeight option.Option[protocol.Height] + StepHeights []l2stateprovider.Height } // Get a list of state roots from the cache up to a certain index. State roots are saved as files in the directory @@ -82,15 +86,17 @@ type Key struct { // is returned. func (c *Cache) Get( lookup *Key, - readUpTo protocol.Height, + numToRead uint64, ) ([]common.Hash, error) { fName, err := determineFilePath(c.baseDir, lookup) if err != nil { return nil, err } if _, err := os.Stat(fName); err != nil { + srvlog.Warn("Cache miss", log.Ctx{"fileName": fName}) return nil, ErrNotFoundInCache } + srvlog.Debug("Cache hit", log.Ctx{"fileName": fName}) f, err := os.Open(fName) if err != nil { return nil, err @@ -100,7 +106,7 @@ func (c *Cache) Get( log.Error("Could not close file after reading", "err", err, "file", fName) } }() - return readStateRoots(f, readUpTo) + return readStateRoots(f, numToRead) } // Put a list of state roots into the cache. @@ -149,12 +155,11 @@ func (c *Cache) Put(lookup *Key, stateRoots []common.Hash) error { } // Reads 32 bytes at a time from a reader up to a specified height. If none, then read all. -func readStateRoots(r io.Reader, readUpTo protocol.Height) ([]common.Hash, error) { +func readStateRoots(r io.Reader, numToRead uint64) ([]common.Hash, error) { br := bufio.NewReader(r) stateRoots := make([]common.Hash, 0) buf := make([]byte, 0, 32) - totalRead := uint64(0) - for { + for totalRead := uint64(0); totalRead < numToRead; totalRead++ { n, err := br.Read(buf[:cap(buf)]) if err != nil { // If we try to read but reach EOF, we break out of the loop. @@ -168,15 +173,11 @@ func readStateRoots(r io.Reader, readUpTo protocol.Height) ([]common.Hash, error return nil, fmt.Errorf("expected to read 32 bytes, got %d bytes", n) } stateRoots = append(stateRoots, common.BytesToHash(buf)) - if totalRead >= uint64(readUpTo) { - return stateRoots, nil - } - totalRead++ } - if readUpTo >= protocol.Height(len(stateRoots)) { + if protocol.Height(numToRead) > protocol.Height(len(stateRoots)) { return nil, fmt.Errorf( - "wanted to read up to %d, but only read %d state roots", - readUpTo, + "wanted to read %d roots, but only read %d state roots", + numToRead, len(stateRoots), ) } @@ -210,16 +211,23 @@ for a given filesystem challenge cache will look as follows: wavm-module-root-0xab/ message-num-70/ roots.txt - big-step-100/ + subchallenge-level-0-big-step-100/ roots.txt */ func determineFilePath(baseDir string, lookup *Key) (string, error) { key := make([]string, 0) key = append(key, fmt.Sprintf("%s-%s", wavmModuleRootPrefix, lookup.WavmModuleRoot.Hex())) key = append(key, fmt.Sprintf("%s-%d", messageNumberPrefix, lookup.MessageHeight)) - if !lookup.BigStepHeight.IsNone() { - bigStepHeight := lookup.BigStepHeight.Unwrap() - key = append(key, fmt.Sprintf("%s-%d", bigStepPrefix, bigStepHeight)) + for challengeLevel, height := range lookup.StepHeights { + key = append(key, fmt.Sprintf( + "%s-%d-%s-%d", + challengeLevelPrefix, + challengeLevel+1, // subchallenges start at 1, as level 0 is the block challenge level. + bigStepPrefix, + height, + ), + ) + } key = append(key, stateRootsFileName) return filepath.Join(baseDir, filepath.Join(key...)), nil diff --git a/staker/challenge-cache/cache_test.go b/staker/challenge-cache/cache_test.go index b9fec74b9c..53b8bf85c8 100644 --- a/staker/challenge-cache/cache_test.go +++ b/staker/challenge-cache/cache_test.go @@ -7,13 +7,11 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "strings" "testing" - protocol "github.com/OffchainLabs/bold/chain-abstraction" - "github.com/OffchainLabs/bold/containers/option" + l2stateprovider "github.com/OffchainLabs/bold/layer2-state-provider" "github.com/ethereum/go-ethereum/common" ) @@ -33,10 +31,10 @@ func TestCache(t *testing.T) { key := &Key{ WavmModuleRoot: common.BytesToHash([]byte("foo")), MessageHeight: 0, - BigStepHeight: option.Some(protocol.Height(0)), + StepHeights: []l2stateprovider.Height{l2stateprovider.Height(0)}, } t.Run("Not found", func(t *testing.T) { - _, err := cache.Get(key, protocol.Height(0)) + _, err := cache.Get(key, 0) if !errors.Is(err, ErrNotFoundInCache) { t.Fatal(err) } @@ -55,7 +53,7 @@ func TestCache(t *testing.T) { if err != nil { t.Fatal(err) } - got, err := cache.Get(key, protocol.Height(2)) + got, err := cache.Get(key, 3) if err != nil { t.Fatal(err) } @@ -72,7 +70,7 @@ func TestCache(t *testing.T) { func TestReadWriteStateRoots(t *testing.T) { t.Run("read up to, but had empty reader", func(t *testing.T) { b := bytes.NewBuffer([]byte{}) - _, err := readStateRoots(b, protocol.Height(100)) + _, err := readStateRoots(b, 100) if err == nil { t.Fatal("Wanted error") } @@ -84,7 +82,7 @@ func TestReadWriteStateRoots(t *testing.T) { b := bytes.NewBuffer([]byte{}) want := common.BytesToHash([]byte("foo")) b.Write(want.Bytes()) - roots, err := readStateRoots(b, protocol.Height(0)) + roots, err := readStateRoots(b, 1) if err != nil { t.Fatal(err) } @@ -103,7 +101,7 @@ func TestReadWriteStateRoots(t *testing.T) { b.Write(foo.Bytes()) b.Write(bar.Bytes()) b.Write(baz.Bytes()) - roots, err := readStateRoots(b, protocol.Height(1)) + roots, err := readStateRoots(b, 2) if err != nil { t.Fatal(err) } @@ -174,7 +172,7 @@ func Test_readStateRoots(t *testing.T) { common.BytesToHash([]byte("baz")), } m := &mockReader{wantErr: true, roots: want, err: errors.New("foo")} - _, err := readStateRoots(m, protocol.Height(1)) + _, err := readStateRoots(m, 1) if err == nil { t.Fatal(err) } @@ -189,11 +187,11 @@ func Test_readStateRoots(t *testing.T) { common.BytesToHash([]byte("baz")), } m := &mockReader{wantErr: true, roots: want, err: io.EOF} - _, err := readStateRoots(m, protocol.Height(100)) + _, err := readStateRoots(m, 100) if err == nil { t.Fatal(err) } - if !strings.Contains(err.Error(), "wanted to read up to 100, but only read 0 state roots") { + if !strings.Contains(err.Error(), "wanted to read 100") { t.Fatalf("Unexpected error: %v", err) } }) @@ -204,7 +202,7 @@ func Test_readStateRoots(t *testing.T) { common.BytesToHash([]byte("baz")), } m := &mockReader{wantErr: false, roots: want, bytesRead: 16} - _, err := readStateRoots(m, protocol.Height(2)) + _, err := readStateRoots(m, 2) if err == nil { t.Fatal(err) } @@ -219,7 +217,7 @@ func Test_readStateRoots(t *testing.T) { common.BytesToHash([]byte("baz")), } m := &mockReader{wantErr: false, roots: want, bytesRead: 32} - got, err := readStateRoots(m, protocol.Height(2)) + got, err := readStateRoots(m, 3) if err != nil { t.Fatal(err) } @@ -252,10 +250,10 @@ func Test_determineFilePath(t *testing.T) { baseDir: "", key: &Key{ MessageHeight: 100, - BigStepHeight: option.Some(protocol.Height(50)), + StepHeights: []l2stateprovider.Height{l2stateprovider.Height(50)}, }, }, - want: "wavm-module-root-0x0000000000000000000000000000000000000000000000000000000000000000/message-num-100/big-step-50/state-roots", + want: "wavm-module-root-0x0000000000000000000000000000000000000000000000000000000000000000/message-num-100/subchallenge-level-1-big-step-50/state-roots", wantErr: false, }, } @@ -283,10 +281,7 @@ func Test_determineFilePath(t *testing.T) { func BenchmarkCache_Read_32Mb(b *testing.B) { b.StopTimer() - basePath, err := ioutil.TempDir("", "*") - if err != nil { - b.Fatal(err) - } + basePath := os.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { b.Fatal(err) } @@ -299,19 +294,19 @@ func BenchmarkCache_Read_32Mb(b *testing.B) { key := &Key{ WavmModuleRoot: common.BytesToHash([]byte("foo")), MessageHeight: 0, - BigStepHeight: option.Some(protocol.Height(0)), + StepHeights: []l2stateprovider.Height{l2stateprovider.Height(0)}, } numRoots := 1 << 20 roots := make([]common.Hash, numRoots) for i := range roots { roots[i] = common.BytesToHash([]byte(fmt.Sprintf("%d", i))) } - if err = cache.Put(key, roots); err != nil { + if err := cache.Put(key, roots); err != nil { b.Fatal(err) } b.StartTimer() for i := 0; i < b.N; i++ { - readUpTo := protocol.Height(1 << 20) + readUpTo := uint64(1 << 20) roots, err := cache.Get(key, readUpTo) if err != nil { b.Fatal(err) diff --git a/staker/manager.go b/staker/manager.go index a0a1af0a5a..72731261d8 100644 --- a/staker/manager.go +++ b/staker/manager.go @@ -4,9 +4,11 @@ package staker import ( "context" + solimpl "github.com/OffchainLabs/bold/chain-abstraction/sol-implementation" challengemanager "github.com/OffchainLabs/bold/challenge-manager" "github.com/OffchainLabs/bold/challenge-manager/types" + l2stateprovider "github.com/OffchainLabs/bold/layer2-state-provider" "github.com/OffchainLabs/bold/solgen/go/challengeV2gen" "github.com/OffchainLabs/bold/solgen/go/rollupgen" @@ -23,7 +25,8 @@ func NewManager( callOpts bind.CallOpts, client arbutil.L1Interface, statelessBlockValidator *StatelessBlockValidator, - historyCacheBaseDir string, + historyCacheBaseDir, + validatorName string, ) (*challengemanager.Manager, error) { chain, err := solimpl.NewAssertionChain( ctx, @@ -50,29 +53,40 @@ func NewManager( if err != nil { return nil, err } - bigStepEdgeHeight, err := managerBinding.LAYERZEROBIGSTEPEDGEHEIGHT(&callOpts) + numBigStepLevel, err := managerBinding.NUMBIGSTEPLEVEL(&callOpts) if err != nil { return nil, err } - smallStepEdgeHeight, err := managerBinding.LAYERZEROSMALLSTEPEDGEHEIGHT(&callOpts) - if err != nil { - return nil, err + challengeLeafHeights := make([]l2stateprovider.Height, numBigStepLevel+2) + for i := uint8(0); i <= numBigStepLevel+1; i++ { + leafHeight, err := managerBinding.GetLayerZeroEndHeight(&callOpts, i) + if err != nil { + return nil, err + } + challengeLeafHeights[i] = l2stateprovider.Height(leafHeight.Uint64()) } + stateManager, err := NewStateManager( statelessBlockValidator, - nil, - smallStepEdgeHeight.Uint64(), - bigStepEdgeHeight.Uint64()*smallStepEdgeHeight.Uint64(), historyCacheBaseDir, + challengeLeafHeights, + validatorName, ) if err != nil { return nil, err } + provider := l2stateprovider.NewHistoryCommitmentProvider( + stateManager, + stateManager, + stateManager, + challengeLeafHeights, + stateManager, + ) manager, err := challengemanager.New( ctx, chain, client, - stateManager, + provider, rollupAddress, challengemanager.WithMode(types.MakeMode), ) diff --git a/staker/state_provider.go b/staker/state_provider.go index 8caaaa3bb4..b9f09e86c5 100644 --- a/staker/state_provider.go +++ b/staker/state_provider.go @@ -6,7 +6,7 @@ import ( "context" "errors" "fmt" - "strings" + "sync" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" @@ -15,16 +15,17 @@ import ( protocol "github.com/OffchainLabs/bold/chain-abstraction" "github.com/OffchainLabs/bold/containers/option" l2stateprovider "github.com/OffchainLabs/bold/layer2-state-provider" - "github.com/OffchainLabs/bold/solgen/go/rollupgen" - commitments "github.com/OffchainLabs/bold/state-commitments/history" - prefixproofs "github.com/OffchainLabs/bold/state-commitments/prefix-proofs" - "github.com/offchainlabs/nitro/arbutil" challengecache "github.com/offchainlabs/nitro/staker/challenge-cache" "github.com/offchainlabs/nitro/validator" ) -var _ l2stateprovider.Provider = (*StateManager)(nil) +var ( + _ l2stateprovider.ProofCollector = (*StateManager)(nil) + _ l2stateprovider.L2MessageStateCollector = (*StateManager)(nil) + _ l2stateprovider.MachineHashCollector = (*StateManager)(nil) + _ l2stateprovider.ExecutionProvider = (*StateManager)(nil) +) // Defines the ABI encoding structure for submission of prefix proofs to the protocol contracts var ( @@ -36,440 +37,268 @@ var ( } ) -var ErrChainCatchingUp = errors.New("chain catching up") +var ( + ErrChainCatchingUp = errors.New("chain catching up") +) + +type Opt func(*StateManager) + +func DisableCache() Opt { + return func(sm *StateManager) { + sm.historyCache = nil + } +} type StateManager struct { validator *StatelessBlockValidator - blockValidator *BlockValidator - numOpcodesPerBigStep uint64 - maxWavmOpcodes uint64 historyCache challengecache.HistoryCommitmentCacher + challengeLeafHeights []l2stateprovider.Height + validatorName string + sync.RWMutex } -func NewStateManager(val *StatelessBlockValidator, blockValidator *BlockValidator, numOpcodesPerBigStep uint64, maxWavmOpcodes uint64, cacheBaseDir string) (*StateManager, error) { +func NewStateManager( + val *StatelessBlockValidator, + cacheBaseDir string, + challengeLeafHeights []l2stateprovider.Height, + validatorName string, + opts ...Opt, +) (*StateManager, error) { historyCache := challengecache.New(cacheBaseDir) - return &StateManager{ + sm := &StateManager{ validator: val, - blockValidator: blockValidator, - numOpcodesPerBigStep: numOpcodesPerBigStep, - maxWavmOpcodes: maxWavmOpcodes, historyCache: historyCache, - }, nil + challengeLeafHeights: challengeLeafHeights, + validatorName: validatorName, + } + for _, o := range opts { + o(sm) + } + return sm, nil } // ExecutionStateMsgCount If the state manager locally has this validated execution state. // Returns ErrNoExecutionState if not found, or ErrChainCatchingUp if not yet // validated / syncing. -func (s *StateManager) ExecutionStateMsgCount(ctx context.Context, state *protocol.ExecutionState) (uint64, error) { +func (s *StateManager) AgreesWithExecutionState(ctx context.Context, state *protocol.ExecutionState) error { if state.GlobalState.PosInBatch != 0 { - return 0, fmt.Errorf("position in batch must be zero, but got %d", state.GlobalState.PosInBatch) + return fmt.Errorf("position in batch must be zero, but got %d: %+v", state.GlobalState.PosInBatch, state) } - if state.GlobalState.Batch == 1 && state.GlobalState.PosInBatch == 0 { - // TODO: 1 is correct? - return 1, nil + // We always agree with the genesis batch. + batchIndex := state.GlobalState.Batch + if batchIndex == 0 && state.GlobalState.PosInBatch == 0 { + return nil } - batch := state.GlobalState.Batch - 1 - messageCount, err := s.validator.inboxTracker.GetBatchMessageCount(batch) - if err != nil { - return 0, err + // We always agree with the init message. + if batchIndex == 1 && state.GlobalState.PosInBatch == 0 { + return nil } - validatedExecutionState, err := s.executionStateAtMessageNumberImpl(ctx, uint64(messageCount)-1) - if err != nil { - return 0, err - } - if validatedExecutionState.GlobalState.Batch < batch { - return 0, ErrChainCatchingUp - } - res, err := s.validator.streamer.ResultAtCount(messageCount) - if err != nil { - return 0, err - } - if res.BlockHash != state.GlobalState.BlockHash || res.SendRoot != state.GlobalState.SendRoot { - return 0, l2stateprovider.ErrNoExecutionState - } - return uint64(messageCount), nil -} -// ExecutionStateAtMessageNumber Produces the l2 state to assert at the message number specified. -// Makes sure that PosInBatch is always 0 -func (s *StateManager) ExecutionStateAtMessageNumber(ctx context.Context, messageNumber uint64) (*protocol.ExecutionState, error) { - executionState, err := s.executionStateAtMessageNumberImpl(ctx, messageNumber) - if err != nil { - return nil, err - } - if executionState.GlobalState.PosInBatch != 0 { - executionState.GlobalState.Batch++ - executionState.GlobalState.PosInBatch = 0 - } - return executionState, nil -} + // Because an execution state from the assertion chain fully consumes the preceding batch, + // we actually want to check if we agree with the last state of the preceding batch, so + // we decrement the batch index by 1. + batchIndex -= 1 -func (s *StateManager) executionStateAtMessageNumberImpl(ctx context.Context, messageNumber uint64) (*protocol.ExecutionState, error) { - batch, err := s.findBatchAfterMessageCount(arbutil.MessageIndex(messageNumber)) - if err != nil { - return &protocol.ExecutionState{}, err - } - batchMsgCount, err := s.validator.inboxTracker.GetBatchMessageCount(batch) - if err != nil { - return &protocol.ExecutionState{}, err - } - if batchMsgCount <= arbutil.MessageIndex(messageNumber) { - batch++ - } - globalState, err := s.getInfoAtMessageCountAndBatch(arbutil.MessageIndex(messageNumber), batch) + totalBatches, err := s.validator.inboxTracker.GetBatchCount() if err != nil { - return &protocol.ExecutionState{}, err + return err } - return &protocol.ExecutionState{ - GlobalState: protocol.GoGlobalState(globalState), - MachineStatus: protocol.MachineStatusFinished, // TODO: Why hardcode? - }, nil -} -// HistoryCommitmentAtMessage Produces a block history commitment of messageCount. -func (s *StateManager) HistoryCommitmentAtMessage(ctx context.Context, messageNumber uint64) (commitments.History, error) { - batch, err := s.findBatchAfterMessageCount(arbutil.MessageIndex(messageNumber)) - if err != nil { - return commitments.History{}, err - } - batchMsgCount, err := s.validator.inboxTracker.GetBatchMessageCount(messageNumber) - if err != nil { - return commitments.History{}, err + // If the batch index is >= the total number of batches we have in our inbox tracker, + // we are still catching up to the chain. + if batchIndex >= totalBatches { + return ErrChainCatchingUp } - if batchMsgCount <= arbutil.MessageIndex(messageNumber) { - batch++ - } - stateRoot, err := s.getHashAtMessageCountAndBatch(ctx, arbutil.MessageIndex(messageNumber), batch) + messageCount, err := s.validator.inboxTracker.GetBatchMessageCount(batchIndex) if err != nil { - return commitments.History{}, err + return err } - return commitments.New([]common.Hash{stateRoot}) -} - -func (s *StateManager) HistoryCommitmentAtBatch(ctx context.Context, batchNumber uint64) (commitments.History, error) { - batchMsgCount, err := s.validator.inboxTracker.GetBatchMessageCount(batchNumber) + validatedGlobalState, err := s.findGlobalStateFromMessageCountAndBatch(messageCount, l2stateprovider.Batch(batchIndex)) if err != nil { - return commitments.History{}, err + return err } - res, err := s.validator.streamer.ResultAtCount(batchMsgCount - 1) - if err != nil { - return commitments.History{}, err - } - state := validator.GoGlobalState{ - BlockHash: res.BlockHash, - SendRoot: res.SendRoot, - Batch: batchNumber, - PosInBatch: 0, + // We check if the block hash and send root match at our expected result. + if state.GlobalState.BlockHash != validatedGlobalState.BlockHash || state.GlobalState.SendRoot != validatedGlobalState.SendRoot { + return l2stateprovider.ErrNoExecutionState } - machineHash := crypto.Keccak256Hash([]byte("Machine finished:"), state.Hash().Bytes()) - return commitments.New([]common.Hash{machineHash}) + return nil } -// BigStepCommitmentUpTo Produces a big step history commitment from big step 0 to toBigStep within block -// challenge heights blockHeight and blockHeight+1. -func (s *StateManager) BigStepCommitmentUpTo(ctx context.Context, wasmModuleRoot common.Hash, messageNumber uint64, toBigStep uint64) (commitments.History, error) { - result, err := s.intermediateBigStepLeaves(ctx, wasmModuleRoot, messageNumber, toBigStep) - if err != nil { - return commitments.History{}, err - } - return commitments.New(result) -} - -// SmallStepCommitmentUpTo Produces a small step history commitment from small step 0 to N between -// big steps bigStep to bigStep+1 within block challenge heights blockHeight to blockHeight+1. -func (s *StateManager) SmallStepCommitmentUpTo(ctx context.Context, wasmModuleRoot common.Hash, messageNumber uint64, bigStep uint64, toSmallStep uint64) (commitments.History, error) { - result, err := s.intermediateSmallStepLeaves(ctx, wasmModuleRoot, messageNumber, bigStep, toSmallStep) - if err != nil { - return commitments.History{}, err - } - return commitments.New(result) -} - -// HistoryCommitmentUpToBatch Produces a block challenge history commitment in a certain inclusive block range, -// but padding states with duplicates after the first state with a batch count of at least the specified max. -func (s *StateManager) HistoryCommitmentUpToBatch(ctx context.Context, messageNumberStart uint64, messageNumberEnd uint64, nextBatchCount uint64) (commitments.History, error) { - stateRoots, err := s.statesUpTo(messageNumberStart, messageNumberEnd, nextBatchCount) - if err != nil { - return commitments.History{}, err - } - return commitments.New(stateRoots) -} - -// BigStepLeafCommitment Produces a big step history commitment for all big steps within block -// challenge heights blockHeight to blockHeight+1. -func (s *StateManager) BigStepLeafCommitment(ctx context.Context, wasmModuleRoot common.Hash, messageNumber uint64) (commitments.History, error) { - // Number of big steps between assertion heights A and B will be - // fixed. It is simply the max number of opcodes - // per block divided by the size of a big step. - numBigSteps := s.maxWavmOpcodes / s.numOpcodesPerBigStep - return s.BigStepCommitmentUpTo(ctx, wasmModuleRoot, messageNumber, numBigSteps) -} - -// SmallStepLeafCommitment Produces a small step history commitment for all small steps between -// big steps bigStep to bigStep+1 within block challenge heights blockHeight to blockHeight+1. -func (s *StateManager) SmallStepLeafCommitment(ctx context.Context, wasmModuleRoot common.Hash, messageNumber uint64, bigStep uint64) (commitments.History, error) { - return s.SmallStepCommitmentUpTo( - ctx, - wasmModuleRoot, - messageNumber, - bigStep, - s.numOpcodesPerBigStep, - ) -} - -// PrefixProofUpToBatch Produces a prefix proof in a block challenge from height A to B, -// but padding states with duplicates after the first state with a batch count of at least the specified max. -func (s *StateManager) PrefixProofUpToBatch( - ctx context.Context, - startHeight, - fromMessageNumber, - toMessageNumber, - batchCount uint64, -) ([]byte, error) { - if toMessageNumber > batchCount { - return nil, errors.New("toMessageNumber should not be greater than batchCount") +// ExecutionStateAfterBatchCount Produces the l2 state to assert at the message number specified. +// Makes sure that PosInBatch is always 0 +func (s *StateManager) ExecutionStateAfterBatchCount(ctx context.Context, batchCount uint64) (*protocol.ExecutionState, error) { + if batchCount == 0 { + return nil, errors.New("batch count cannot be zero") } - states, err := s.statesUpTo(startHeight, toMessageNumber, batchCount) + batchIndex := batchCount - 1 + messageCount, err := s.validator.inboxTracker.GetBatchMessageCount(batchIndex) if err != nil { return nil, err } - loSize := fromMessageNumber + 1 - startHeight - hiSize := toMessageNumber + 1 - startHeight - return s.getPrefixProof(loSize, hiSize, states) -} - -// BigStepPrefixProof Produces a big step prefix proof from height A to B for heights fromBlockChallengeHeight to H+1 -// within a block challenge. -func (s *StateManager) BigStepPrefixProof( - ctx context.Context, - wasmModuleRoot common.Hash, - messageNumber uint64, - fromBigStep uint64, - toBigStep uint64, -) ([]byte, error) { - prefixLeaves, err := s.intermediateBigStepLeaves(ctx, wasmModuleRoot, messageNumber, toBigStep) + globalState, err := s.findGlobalStateFromMessageCountAndBatch(messageCount, l2stateprovider.Batch(batchIndex)) if err != nil { return nil, err } - loSize := fromBigStep + 1 - hiSize := toBigStep + 1 - return s.getPrefixProof(loSize, hiSize, prefixLeaves) -} - -// SmallStepPrefixProof Produces a small step prefix proof from height A to B for big step S to S+1 and -// block challenge height heights H to H+1. -func (s *StateManager) SmallStepPrefixProof(ctx context.Context, wasmModuleRoot common.Hash, messageNumber uint64, bigStep uint64, fromSmallStep uint64, toSmallStep uint64) ([]byte, error) { - prefixLeaves, err := s.intermediateSmallStepLeaves(ctx, wasmModuleRoot, messageNumber, bigStep, toSmallStep) - if err != nil { - return nil, err + executionState := &protocol.ExecutionState{ + GlobalState: protocol.GoGlobalState(globalState), + MachineStatus: protocol.MachineStatusFinished, } - loSize := fromSmallStep + 1 - hiSize := toSmallStep + 1 - return s.getPrefixProof(loSize, hiSize, prefixLeaves) -} - -// Like abi.NewType but panics if it fails for use in constants -func newStaticType(t string, internalType string, components []abi.ArgumentMarshaling) abi.Type { - ty, err := abi.NewType(t, internalType, components) - if err != nil { - panic(err) + // If the execution state did not consume all messages in a batch, we then return + // the next batch's execution state. + if executionState.GlobalState.PosInBatch != 0 { + executionState.GlobalState.Batch += 1 + executionState.GlobalState.PosInBatch = 0 } - return ty -} - -var bytes32Type = newStaticType("bytes32", "", nil) -var uint64Type = newStaticType("uint64", "", nil) -var uint8Type = newStaticType("uint8", "", nil) - -var WasmModuleProofAbi = abi.Arguments{ - { - Name: "lastHash", - Type: bytes32Type, - }, - { - Name: "assertionExecHash", - Type: bytes32Type, - }, - { - Name: "inboxAcc", - Type: bytes32Type, - }, -} - -var ExecutionStateAbi = abi.Arguments{ - { - Name: "b1", - Type: bytes32Type, - }, - { - Name: "b2", - Type: bytes32Type, - }, - { - Name: "u1", - Type: uint64Type, - }, - { - Name: "u2", - Type: uint64Type, - }, - { - Name: "status", - Type: uint8Type, - }, + return executionState, nil } -func (s *StateManager) OneStepProofData( - ctx context.Context, - wasmModuleRoot common.Hash, - postState rollupgen.ExecutionState, - messageNumber, - bigStep, - smallStep uint64, -) (*protocol.OneStepData, []common.Hash, []common.Hash, error) { - endCommit, err := s.SmallStepCommitmentUpTo( - ctx, - wasmModuleRoot, - messageNumber, - bigStep, - smallStep+1, - ) - if err != nil { - return nil, nil, nil, err - } - startCommit, err := s.SmallStepCommitmentUpTo( - ctx, - wasmModuleRoot, - messageNumber, - bigStep, - smallStep, - ) - if err != nil { - return nil, nil, nil, err +func (s *StateManager) StatesInBatchRange( + fromHeight, + toHeight l2stateprovider.Height, + fromBatch, + toBatch l2stateprovider.Batch, +) ([]common.Hash, []validator.GoGlobalState, error) { + // Check integrity of the arguments. + if fromBatch > toBatch { + return nil, nil, fmt.Errorf("from batch %v is greater than to batch %v", fromBatch, toBatch) + } + if fromHeight > toHeight { + return nil, nil, fmt.Errorf("from height %v is greater than to height %v", fromHeight, toHeight) } - step := bigStep*s.numOpcodesPerBigStep + smallStep - - entry, err := s.validator.CreateReadyValidationEntry(ctx, arbutil.MessageIndex(messageNumber)) + // The last message's batch count. + prevBatchMsgCount, err := s.validator.inboxTracker.GetBatchMessageCount(uint64(fromBatch) - 1) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - input, err := entry.ToInput() + gs, err := s.findGlobalStateFromMessageCountAndBatch(prevBatchMsgCount, fromBatch-1) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - execRun, err := s.validator.execSpawner.CreateExecutionRun(wasmModuleRoot, input).Await(ctx) - if err != nil { - return nil, nil, nil, err + if gs.PosInBatch == 0 { + return nil, nil, errors.New("final state of batch cannot be at position zero") } - - oneStepProofPromise := execRun.GetProofAt(step) - oneStepProof, err := oneStepProofPromise.Await(ctx) - if err != nil { - return nil, nil, nil, err + // The start state root of our history commitment starts at `batch: fromBatch, pos: 0` using the state + // from the last batch. + gs.Batch += 1 + gs.PosInBatch = 0 + stateRoots := []common.Hash{ + crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()), } + globalStates := []validator.GoGlobalState{gs} - machineStepPromise := execRun.GetStepAt(step) - machineStep, err := machineStepPromise.Await(ctx) - if err != nil { - return nil, nil, nil, err - } - beforeHash := machineStep.Hash - if beforeHash != startCommit.LastLeaf { - return nil, nil, nil, fmt.Errorf("machine executed to start step %v hash %v but expected %v", step, beforeHash, startCommit.LastLeaf) - } + // Check if there are enough messages in the range to satisfy our request. + totalDesiredHashes := (toHeight - fromHeight) + 1 - machineStepPromise = execRun.GetStepAt(step + 1) - machineStep, err = machineStepPromise.Await(ctx) - if err != nil { - return nil, nil, nil, err + // We can return early if all we want is one hash. + if totalDesiredHashes == 1 && fromHeight == 0 && toHeight == 0 { + return stateRoots, globalStates, nil } - afterHash := machineStep.Hash - if afterHash != endCommit.LastLeaf { - return nil, nil, nil, fmt.Errorf("machine executed to end step %v hash %v but expected %v", step+1, beforeHash, endCommit.LastLeaf) + + for batch := fromBatch; batch < toBatch; batch++ { + msgCount, err := s.validator.inboxTracker.GetBatchMessageCount(uint64(batch)) + if err != nil { + return nil, nil, err + } + var lastGlobalState validator.GoGlobalState + + msgsInBatch := msgCount - prevBatchMsgCount + for i := uint64(1); i <= uint64(msgsInBatch); i++ { + msgIndex := uint64(prevBatchMsgCount) + i + gs, err := s.findGlobalStateFromMessageCountAndBatch(arbutil.MessageIndex(msgIndex), batch) + if err != nil { + return nil, nil, err + } + globalStates = append(globalStates, gs) + stateRoots = append(stateRoots, + crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()), + ) + lastGlobalState = gs + } + prevBatchMsgCount = msgCount + lastGlobalState.Batch += 1 + lastGlobalState.PosInBatch = 0 + stateRoots = append(stateRoots, + crypto.Keccak256Hash([]byte("Machine finished:"), lastGlobalState.Hash().Bytes()), + ) + globalStates = append(globalStates, lastGlobalState) } - data := &protocol.OneStepData{ - BeforeHash: startCommit.LastLeaf, - Proof: oneStepProof, + for uint64(len(stateRoots)) < uint64(totalDesiredHashes) { + stateRoots = append(stateRoots, stateRoots[len(stateRoots)-1]) } - return data, startCommit.LastLeafProof, endCommit.LastLeafProof, nil + return stateRoots[fromHeight : toHeight+1], globalStates[fromHeight : toHeight+1], nil } -func (s *StateManager) AgreesWithHistoryCommitment( - ctx context.Context, - wasmModuleRoot common.Hash, - assertionInboxMaxCount uint64, - parentAssertionAfterStateBatch uint64, - edgeType protocol.EdgeType, - heights protocol.OriginHeights, - history l2stateprovider.History, -) (bool, error) { - var localCommit commitments.History +func (s *StateManager) findGlobalStateFromMessageCountAndBatch(count arbutil.MessageIndex, batchIndex l2stateprovider.Batch) (validator.GoGlobalState, error) { + var prevBatchMsgCount arbutil.MessageIndex var err error - switch edgeType { - case protocol.BlockChallengeEdge: - localCommit, err = s.HistoryCommitmentUpToBatch(ctx, parentAssertionAfterStateBatch, parentAssertionAfterStateBatch+history.Height, assertionInboxMaxCount) + if batchIndex > 0 { + prevBatchMsgCount, err = s.validator.inboxTracker.GetBatchMessageCount(uint64(batchIndex) - 1) if err != nil { - return false, err - } - case protocol.BigStepChallengeEdge: - localCommit, err = s.BigStepCommitmentUpTo( - ctx, - wasmModuleRoot, - uint64(heights.BlockChallengeOriginHeight), - history.Height, - ) - if err != nil { - return false, err + return validator.GoGlobalState{}, err } - case protocol.SmallStepChallengeEdge: - localCommit, err = s.SmallStepCommitmentUpTo( - ctx, - wasmModuleRoot, - uint64(heights.BlockChallengeOriginHeight), - uint64(heights.BigStepChallengeOriginHeight), - history.Height, - ) - if err != nil { - return false, err + if prevBatchMsgCount > count { + return validator.GoGlobalState{}, errors.New("bad batch provided") } - default: - return false, errors.New("unsupported edge type") } - return localCommit.Height == history.Height && localCommit.Merkle == history.MerkleRoot, nil -} - -func (s *StateManager) getPrefixProof(loSize uint64, hiSize uint64, leaves []common.Hash) ([]byte, error) { - prefixExpansion, err := prefixproofs.ExpansionFromLeaves(leaves[:loSize]) + res, err := s.validator.streamer.ResultAtCount(count) if err != nil { - return nil, err + return validator.GoGlobalState{}, fmt.Errorf("%s: could not check if we have result at count %d: %w", s.validatorName, count, err) } - prefixProof, err := prefixproofs.GeneratePrefixProof( - loSize, - prefixExpansion, - leaves[loSize:hiSize], - prefixproofs.RootFetcherFromExpansion, - ) + return validator.GoGlobalState{ + BlockHash: res.BlockHash, + SendRoot: res.SendRoot, + Batch: uint64(batchIndex), + PosInBatch: uint64(count - prevBatchMsgCount), + }, nil +} + +// L2MessageStatesUpTo Computes a block history commitment from a start L2 message to an end L2 message index +// and up to a required batch index. The hashes used for this commitment are the machine hashes +// at each message number. +func (s *StateManager) L2MessageStatesUpTo( + _ context.Context, + fromHeight l2stateprovider.Height, + toHeight option.Option[l2stateprovider.Height], + fromBatch, + toBatch l2stateprovider.Batch, +) ([]common.Hash, error) { + var to l2stateprovider.Height + if !toHeight.IsNone() { + to = toHeight.Unwrap() + } else { + blockChallengeLeafHeight := s.challengeLeafHeights[0] + to = blockChallengeLeafHeight + } + items, _, err := s.StatesInBatchRange(fromHeight, to, fromBatch, toBatch) if err != nil { return nil, err } - _, numRead := prefixproofs.MerkleExpansionFromCompact(prefixProof, loSize) - onlyProof := prefixProof[numRead:] - return ProofArgs.Pack(&prefixExpansion, &onlyProof) + return items, nil } -func (s *StateManager) intermediateBigStepLeaves(ctx context.Context, wasmModuleRoot common.Hash, blockHeight uint64, toBigStep uint64) ([]common.Hash, error) { +// CollectMachineHashes Collects a list of machine hashes at a message number based on some configuration parameters. +func (s *StateManager) CollectMachineHashes( + ctx context.Context, cfg *l2stateprovider.HashCollectorConfig, +) ([]common.Hash, error) { + s.Lock() + defer s.Unlock() cacheKey := &challengecache.Key{ - WavmModuleRoot: wasmModuleRoot, - MessageHeight: protocol.Height(blockHeight), - BigStepHeight: option.None[protocol.Height](), - } - cachedRoots, err := s.historyCache.Get(cacheKey, protocol.Height(toBigStep)) - if err == nil { - return cachedRoots, nil + WavmModuleRoot: cfg.WasmModuleRoot, + MessageHeight: protocol.Height(cfg.MessageNumber), + StepHeights: cfg.StepHeights, + } + if s.historyCache != nil { + cachedRoots, err := s.historyCache.Get(cacheKey, cfg.NumDesiredHashes) + switch { + case err == nil: + return cachedRoots, nil + case !errors.Is(err, challengecache.ErrNotFoundInCache): + return nil, err + } } - entry, err := s.validator.CreateReadyValidationEntry(ctx, arbutil.MessageIndex(blockHeight)) + entry, err := s.validator.CreateReadyValidationEntry(ctx, arbutil.MessageIndex(cfg.MessageNumber)) if err != nil { return nil, err } @@ -477,17 +306,17 @@ func (s *StateManager) intermediateBigStepLeaves(ctx context.Context, wasmModule if err != nil { return nil, err } - execRun, err := s.validator.execSpawner.CreateExecutionRun(wasmModuleRoot, input).Await(ctx) + execRun, err := s.validator.execSpawner.CreateExecutionRun(cfg.WasmModuleRoot, input).Await(ctx) if err != nil { return nil, err } - bigStepLeaves := execRun.GetBigStepLeavesUpTo(toBigStep, s.numOpcodesPerBigStep) - result, err := bigStepLeaves.Await(ctx) + stepLeaves := execRun.GetLeavesWithStepSize(uint64(cfg.MachineStartIndex), uint64(cfg.StepSize), cfg.NumDesiredHashes) + result, err := stepLeaves.Await(ctx) if err != nil { return nil, err } - // TODO: Hacky workaround to avoid saving a history commitment to height 0. - if len(result) > 1 { + // Do not save a history commitment of length 1 to the cache. + if len(result) > 1 && s.historyCache != nil { if err := s.historyCache.Put(cacheKey, result); err != nil { if !errors.Is(err, challengecache.ErrFileAlreadyExists) { return nil, err @@ -497,17 +326,14 @@ func (s *StateManager) intermediateBigStepLeaves(ctx context.Context, wasmModule return result, nil } -func (s *StateManager) intermediateSmallStepLeaves(ctx context.Context, wasmModuleRoot common.Hash, blockHeight uint64, bigStep uint64, toSmallStep uint64) ([]common.Hash, error) { - cacheKey := &challengecache.Key{ - WavmModuleRoot: wasmModuleRoot, - MessageHeight: protocol.Height(blockHeight), - BigStepHeight: option.Some[protocol.Height](protocol.Height(bigStep)), - } - cachedRoots, err := s.historyCache.Get(cacheKey, protocol.Height(toSmallStep)) - if err == nil { - return cachedRoots, nil - } - entry, err := s.validator.CreateReadyValidationEntry(ctx, arbutil.MessageIndex(blockHeight)) +// CollectProof Collects osp of at a message number and OpcodeIndex . +func (s *StateManager) CollectProof( + ctx context.Context, + wasmModuleRoot common.Hash, + messageNumber l2stateprovider.Height, + machineIndex l2stateprovider.OpcodeIndex, +) ([]byte, error) { + entry, err := s.validator.CreateReadyValidationEntry(ctx, arbutil.MessageIndex(messageNumber)) if err != nil { return nil, err } @@ -519,150 +345,6 @@ func (s *StateManager) intermediateSmallStepLeaves(ctx context.Context, wasmModu if err != nil { return nil, err } - smallStepLeaves := execRun.GetSmallStepLeavesUpTo(bigStep, toSmallStep, s.numOpcodesPerBigStep) - result, err := smallStepLeaves.Await(ctx) - if err != nil { - return nil, err - } - // TODO: Hacky workaround to avoid saving a history commitment to height 0. - if len(result) > 1 { - if err := s.historyCache.Put(cacheKey, result); err != nil { - if !errors.Is(err, challengecache.ErrFileAlreadyExists) { - return nil, err - } - } - } - return result, nil -} - -// TODO: Rename block to message. -func (s *StateManager) statesUpTo(blockStart uint64, blockEnd uint64, nextBatchCount uint64) ([]common.Hash, error) { - if blockEnd < blockStart { - return nil, fmt.Errorf("end block %v is less than start block %v", blockEnd, blockStart) - } - batch, err := s.findBatchAfterMessageCount(arbutil.MessageIndex(blockStart)) - if err != nil { - return nil, err - } - // TODO: Document why we cannot validate genesis. - if batch == 0 { - batch += 1 - } - // The size is the number of elements being committed to. For example, if the height is 7, there will - // be 8 elements being committed to from [0, 7] inclusive. - desiredStatesLen := int(blockEnd - blockStart + 1) - var stateRoots []common.Hash - var lastStateRoot common.Hash - - // TODO: Document why we cannot validate genesis. - if blockStart == 0 { - blockStart += 1 - } - for i := blockStart; i <= blockEnd; i++ { - batchMsgCount, err := s.validator.inboxTracker.GetBatchMessageCount(batch) - if err != nil { - return nil, err - } - if batchMsgCount <= arbutil.MessageIndex(i) { - batch++ - } - gs, err := s.getInfoAtMessageCountAndBatch(arbutil.MessageIndex(i), batch) - if err != nil { - return nil, err - } - if gs.Batch >= nextBatchCount { - if gs.Batch > nextBatchCount || gs.PosInBatch > 0 { - return nil, fmt.Errorf("overran next batch count %v with global state batch %v position %v", nextBatchCount, gs.Batch, gs.PosInBatch) - } - break - } - stateRoot := crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()) - stateRoots = append(stateRoots, stateRoot) - lastStateRoot = stateRoot - } - for len(stateRoots) < desiredStatesLen { - stateRoots = append(stateRoots, lastStateRoot) - } - return stateRoots, nil -} - -func (s *StateManager) findBatchAfterMessageCount(msgCount arbutil.MessageIndex) (uint64, error) { - if msgCount == 0 { - return 0, nil - } - low := uint64(0) - batchCount, err := s.validator.inboxTracker.GetBatchCount() - if err != nil { - return 0, err - } - high := batchCount - for { - // Binary search invariants: - // - messageCount(high) >= msgCount - // - messageCount(low-1) < msgCount - // - high >= low - if high < low { - return 0, fmt.Errorf("when attempting to find batch for message count %v high %v < low %v", msgCount, high, low) - } - mid := (low + high) / 2 - batchMsgCount, err := s.validator.inboxTracker.GetBatchMessageCount(mid) - if err != nil { - // TODO: There is a circular dep with the error in inbox_tracker.go, we - // should move it somewhere else and use errors.Is. - if strings.Contains(err.Error(), "accumulator not found") { - high = mid - } else { - return 0, fmt.Errorf("failed to get batch metadata while binary searching: %w", err) - } - } - if batchMsgCount < msgCount { - low = mid + 1 - } else if batchMsgCount == msgCount { - return mid + 1, nil - } else if mid == low { // batchMsgCount > msgCount - return mid, nil - } else { // batchMsgCount > msgCount - high = mid - } - } -} - -func (s *StateManager) getHashAtMessageCountAndBatch(_ context.Context, messageCount arbutil.MessageIndex, batch uint64) (common.Hash, error) { - gs, err := s.getInfoAtMessageCountAndBatch(messageCount, batch) - if err != nil { - return common.Hash{}, err - } - return crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()), nil -} - -func (s *StateManager) getInfoAtMessageCountAndBatch(messageCount arbutil.MessageIndex, batch uint64) (validator.GoGlobalState, error) { - globalState, err := s.findGlobalStateFromMessageCountAndBatch(messageCount, batch) - if err != nil { - return validator.GoGlobalState{}, err - } - return globalState, nil -} - -func (s *StateManager) findGlobalStateFromMessageCountAndBatch(count arbutil.MessageIndex, batch uint64) (validator.GoGlobalState, error) { - var prevBatchMsgCount arbutil.MessageIndex - var err error - if batch > 0 { - prevBatchMsgCount, err = s.validator.inboxTracker.GetBatchMessageCount(batch - 1) - if err != nil { - return validator.GoGlobalState{}, err - } - if prevBatchMsgCount > count { - return validator.GoGlobalState{}, errors.New("bad batch provided") - } - } - res, err := s.validator.streamer.ResultAtCount(count) - if err != nil { - return validator.GoGlobalState{}, err - } - return validator.GoGlobalState{ - BlockHash: res.BlockHash, - SendRoot: res.SendRoot, - Batch: batch, - PosInBatch: uint64(count - prevBatchMsgCount), - }, nil + oneStepProofPromise := execRun.GetProofAt(uint64(machineIndex)) + return oneStepProofPromise.Await(ctx) } diff --git a/system_tests/assertion_on_large_number_of_batch_test.go b/system_tests/assertion_on_large_number_of_batch_test.go index cb59206a02..1b127333e4 100644 --- a/system_tests/assertion_on_large_number_of_batch_test.go +++ b/system_tests/assertion_on_large_number_of_batch_test.go @@ -65,7 +65,7 @@ func TestAssertionOnLargeNumberOfBatch(t *testing.T) { err = stateless.Start(ctx) Require(t, err) - manager, err := staker.NewStateManager(stateless, nil, numOpcodesPerBigStepTest, maxWavmOpcodesTest, t.TempDir()) + manager, err := staker.NewStateManager(stateless, t.TempDir(), nil) Require(t, err) poster := assertions.NewPoster( diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go new file mode 100644 index 0000000000..d5f41f1b2f --- /dev/null +++ b/system_tests/bold_challenge_protocol_test.go @@ -0,0 +1,751 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// race detection makes things slow and miss timeouts +//go:build challengetest && !race + +package arbtest + +import ( + "bytes" + "context" + "encoding/json" + "io" + "math/big" + "os" + "testing" + "time" + + "github.com/OffchainLabs/bold/assertions" + protocol "github.com/OffchainLabs/bold/chain-abstraction" + solimpl "github.com/OffchainLabs/bold/chain-abstraction/sol-implementation" + challengemanager "github.com/OffchainLabs/bold/challenge-manager" + modes "github.com/OffchainLabs/bold/challenge-manager/types" + l2stateprovider "github.com/OffchainLabs/bold/layer2-state-provider" + "github.com/OffchainLabs/bold/solgen/go/bridgegen" + "github.com/OffchainLabs/bold/solgen/go/mocksgen" + "github.com/OffchainLabs/bold/solgen/go/rollupgen" + challenge_testing "github.com/OffchainLabs/bold/testing" + "github.com/OffchainLabs/bold/testing/setup" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/statetransfer" + "github.com/offchainlabs/nitro/util" + "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/validator/server_common" + "github.com/offchainlabs/nitro/validator/valnode" +) + +// One Arbitrum block had 1,849,212,947 total opcodes. The closest, higher power of two +// is 2^31. So we if we make our small step heights 2^20, we need 2048 big steps +// to cover the block. With 2^20, our small step history commitments will be approx +// 32 Mb of state roots in memory at once. +var ( + blockChallengeLeafHeight = uint64(1 << 5) // 32 + bigStepChallengeLeafHeight = uint64(1 << 5) // 5 big step levels, 2^5 each, with small step equalting to 2^31 total. + smallStepChallengeLeafHeight = uint64(1 << 6) +) + +func TestBoldProtocol(t *testing.T) { + t.Cleanup(func() { + Require(t, os.RemoveAll("/tmp/good")) + Require(t, os.RemoveAll("/tmp/evil")) + }) + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs + l2chainConfig := params.ArbitrumDevTestChainConfig() + l2info := NewBlockChainTestInfo( + t, + types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + transferGas, + ) + ownerBal := big.NewInt(params.Ether) + ownerBal.Mul(ownerBal, big.NewInt(1_000_000)) + l2info.GenerateGenesisAccount("Owner", ownerBal) + + _, l2nodeA, _, _, l1info, _, l1client, l1stack, assertionChain, stakeTokenAddr := createTestNodeOnL1ForBoldProtocol(t, ctx, true, nil, l2chainConfig, nil, l2info) + defer requireClose(t, l1stack) + defer l2nodeA.StopAndWait() + + // Every 10 seconds, send an L1 transaction to keep the chain moving. + go func() { + delay := time.Second * 10 + for { + select { + case <-ctx.Done(): + return + default: + time.Sleep(delay) + balance := big.NewInt(params.GWei) + TransferBalance(t, "Faucet", "Asserter", balance, l1info, l1client, ctx) + latestBlock, err := l1client.BlockNumber(ctx) + Require(t, err) + if latestBlock > 150 { + delay = time.Second + } + } + } + }() + + _, l2nodeB, assertionChainB := create2ndNodeWithConfigForBoldProtocol(t, ctx, l2nodeA, l1stack, l1info, &l2info.ArbInitData, arbnode.ConfigDefaultL1Test(), nil, stakeTokenAddr) + defer l2nodeB.StopAndWait() + + nodeAMessage, err := l2nodeA.Execution.HeadMessageNumber() + Require(t, err) + nodeBMessage, err := l2nodeB.Execution.HeadMessageNumber() + Require(t, err) + if nodeAMessage != nodeBMessage { + Fatal(t, "node A L2 genesis hash", nodeAMessage, "!= node B L2 genesis hash", nodeBMessage) + } + + deployAuth := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + + balance := big.NewInt(params.Ether) + balance.Mul(balance, big.NewInt(100)) + TransferBalance(t, "Faucet", "Asserter", balance, l1info, l1client, ctx) + TransferBalance(t, "Faucet", "EvilAsserter", balance, l1info, l1client, ctx) + l1authB := l1info.GetDefaultTransactOpts("EvilAsserter", ctx) + + t.Log("Setting the minimum assertion period") + rollup, err := rollupgen.NewRollupAdminLogicTransactor(assertionChain.RollupAddress(), l1client) + Require(t, err) + tx, err := rollup.SetMinimumAssertionPeriod(&deployAuth, big.NewInt(0)) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) + rollup, err = rollupgen.NewRollupAdminLogicTransactor(assertionChainB.RollupAddress(), l1client) + Require(t, err) + tx, err = rollup.SetMinimumAssertionPeriod(&deployAuth, big.NewInt(0)) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) + + valCfg := valnode.TestValidationConfig + valCfg.UseJit = false + _, valStack := createTestValidationNode(t, ctx, &valCfg) + blockValidatorConfig := staker.TestBlockValidatorConfig + + statelessA, err := staker.NewStatelessBlockValidator( + l2nodeA.InboxReader, + l2nodeA.InboxTracker, + l2nodeA.TxStreamer, + l2nodeA.Execution, + l2nodeA.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + err = statelessA.Start(ctx) + Require(t, err) + + statelessB, err := staker.NewStatelessBlockValidator( + l2nodeB.InboxReader, + l2nodeB.InboxTracker, + l2nodeB.TxStreamer, + l2nodeB.Execution, + l2nodeB.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + err = statelessB.Start(ctx) + Require(t, err) + + stateManager, err := staker.NewStateManager( + statelessA, + "/tmp/good", + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + "good", + ) + Require(t, err) + + poster := assertions.NewPoster( + assertionChain, + stateManager, + "good", + time.Hour, + ) + + stateManagerB, err := staker.NewStateManager( + statelessB, + "/tmp/evil", + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + "evil", + ) + Require(t, err) + + chainB, err := solimpl.NewAssertionChain( + ctx, + assertionChain.RollupAddress(), + &l1authB, + l1client, + ) + Require(t, err) + posterB := assertions.NewPoster( + chainB, + stateManagerB, + "evil", + time.Hour, + ) + + l2info.GenerateAccount("Destination") + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + honestSeqInbox := l1info.GetAddress("SequencerInbox") + evilSeqInbox := l1info.GetAddress("EvilSequencerInbox") + honestSeqInboxBinding, err := bridgegen.NewSequencerInbox(honestSeqInbox, l1client) + Require(t, err) + evilSeqInboxBinding, err := bridgegen.NewSequencerInbox(evilSeqInbox, l1client) + Require(t, err) + + // Post batches to the honest and evil sequencer inbox that are internally equal. + // This means the honest and evil sequencer inboxes will agree with all messages in the batch. + totalMessagesPosted := int64(0) + numMessagesPerBatch := int64(5) + divergeAt := int64(-1) + makeBoldBatch(t, l2nodeA, l2info, l1client, &sequencerTxOpts, honestSeqInboxBinding, honestSeqInbox, numMessagesPerBatch, divergeAt) + l2info.Accounts["Owner"].Nonce = 0 + makeBoldBatch(t, l2nodeB, l2info, l1client, &sequencerTxOpts, evilSeqInboxBinding, evilSeqInbox, numMessagesPerBatch, divergeAt) + totalMessagesPosted += numMessagesPerBatch + + // Next, we post another batch, this time containing more messages. + // We diverge at message index 5 within the evil node's batch. + l2info.Accounts["Owner"].Nonce = 5 + numMessagesPerBatch = int64(10) + makeBoldBatch(t, l2nodeA, l2info, l1client, &sequencerTxOpts, honestSeqInboxBinding, honestSeqInbox, numMessagesPerBatch, divergeAt) + l2info.Accounts["Owner"].Nonce = 5 + divergeAt = int64(5) + makeBoldBatch(t, l2nodeB, l2info, l1client, &sequencerTxOpts, evilSeqInboxBinding, evilSeqInbox, numMessagesPerBatch, divergeAt) + totalMessagesPosted += numMessagesPerBatch + + bcA, err := l2nodeA.InboxTracker.GetBatchCount() + Require(t, err) + bcB, err := l2nodeB.InboxTracker.GetBatchCount() + Require(t, err) + msgA, err := l2nodeA.InboxTracker.GetBatchMessageCount(bcA - 1) + Require(t, err) + msgB, err := l2nodeB.InboxTracker.GetBatchMessageCount(bcB - 1) + Require(t, err) + + t.Logf("Node A batch count %d, msgs %d", bcA, msgA) + t.Logf("Node B batch count %d, msgs %d", bcB, msgB) + + // Wait for both nodes' chains to catch up. + nodeAExec, ok := l2nodeA.Execution.(*gethexec.ExecutionNode) + if !ok { + Fatal(t, "not geth execution node") + } + nodeBExec, ok := l2nodeB.Execution.(*gethexec.ExecutionNode) + if !ok { + Fatal(t, "not geth execution node") + } + for { + nodeALatest := nodeAExec.Backend.APIBackend().CurrentHeader() + nodeBLatest := nodeBExec.Backend.APIBackend().CurrentHeader() + isCaughtUp := nodeALatest.Number.Uint64() == uint64(totalMessagesPosted) + areEqual := nodeALatest.Number.Uint64() == nodeBLatest.Number.Uint64() + if isCaughtUp && areEqual { + if nodeALatest.Hash() == nodeBLatest.Hash() { + Fatal(t, "node A L2 hash", nodeALatest, "matches node B L2 hash", nodeBLatest) + } + break + } + } + + // Wait for the vaidator to validate the batches. + bridgeBinding, err := bridgegen.NewBridge(l1info.GetAddress("Bridge"), l1client) + Require(t, err) + totalBatchesBig, err := bridgeBinding.SequencerMessageCount(&bind.CallOpts{Context: ctx}) + Require(t, err) + totalBatches := totalBatchesBig.Uint64() + totalMessageCount, err := l2nodeA.InboxTracker.GetBatchMessageCount(totalBatches - 1) + Require(t, err) + + // Wait until the validator has validated the batches. + for { + _, err1 := l2nodeA.TxStreamer.ResultAtCount(arbutil.MessageIndex(totalMessageCount)) + nodeAHasValidated := err1 == nil + + _, err2 := l2nodeB.TxStreamer.ResultAtCount(arbutil.MessageIndex(totalMessageCount)) + nodeBHasValidated := err2 == nil + + if nodeAHasValidated && nodeBHasValidated { + break + } + } + + t.Log("Honest party posting assertion at batch 1, pos 0") + _, err = poster.PostAssertion(ctx) + Require(t, err) + + t.Log("Honest party posting assertion at batch 2, pos 0") + expectedWinnerAssertion, err := poster.PostAssertion(ctx) + Require(t, err) + + t.Log("Evil party posting assertion at batch 2, pos 0") + _, err = posterB.PostAssertion(ctx) + Require(t, err) + + provider := l2stateprovider.NewHistoryCommitmentProvider( + stateManager, + stateManager, + stateManager, + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + stateManager, + ) + + evilProvider := l2stateprovider.NewHistoryCommitmentProvider( + stateManagerB, + stateManagerB, + stateManagerB, + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + stateManagerB, + ) + + manager, err := challengemanager.New( + ctx, + assertionChain, + l1client, + provider, + assertionChain.RollupAddress(), + challengemanager.WithName("honest"), + challengemanager.WithMode(modes.DefensiveMode), + challengemanager.WithAssertionPostingInterval(time.Hour), + challengemanager.WithAssertionScanningInterval(time.Hour), + challengemanager.WithEdgeTrackerWakeInterval(time.Second), + ) + Require(t, err) + manager.Start(ctx) + managerB, err := challengemanager.New( + ctx, + chainB, + l1client, + evilProvider, + assertionChain.RollupAddress(), + challengemanager.WithName("evil"), + challengemanager.WithMode(modes.DefensiveMode), + challengemanager.WithAssertionPostingInterval(time.Hour), + challengemanager.WithAssertionScanningInterval(time.Hour), + challengemanager.WithEdgeTrackerWakeInterval(time.Second), + ) + Require(t, err) + managerB.Start(ctx) + + rollupUserLogic, err := rollupgen.NewRollupUserLogic(assertionChain.RollupAddress(), l1client) + Require(t, err) + for { + expected, err := rollupUserLogic.GetAssertion(&bind.CallOpts{Context: ctx}, expectedWinnerAssertion.Id().Hash) + if err != nil { + t.Logf("Error getting assertion: %v", err) + continue + } + // Wait until the assertion is confirmed. + if expected.Status == uint8(2) { + t.Log("Expected assertion was confirmed") + return + } + time.Sleep(time.Second * 5) + } +} + +func createTestNodeOnL1ForBoldProtocol( + t *testing.T, + ctx context.Context, + isSequencer bool, + nodeConfig *arbnode.Config, + chainConfig *params.ChainConfig, + stackConfig *node.Config, + l2info_in info, +) ( + l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l2stack *node.Node, + l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, + assertionChain *solimpl.AssertionChain, stakeTokenAddr common.Address, +) { + if nodeConfig == nil { + nodeConfig = arbnode.ConfigDefaultL1Test() + } + nodeConfig.ParentChainReader.OldHeaderTimeout = time.Minute * 10 + if chainConfig == nil { + chainConfig = params.ArbitrumDevTestChainConfig() + } + nodeConfig.BatchPoster.DataPoster.MaxMempoolTransactions = 0 + fatalErrChan := make(chan error, 10) + l1info, l1client, l1backend, l1stack = createTestL1BlockChain(t, nil) + var l2chainDb ethdb.Database + var l2arbDb ethdb.Database + var l2blockchain *core.BlockChain + l2info = l2info_in + if l2info == nil { + l2info = NewArbTestInfo(t, chainConfig.ChainID) + } + + l1info.GenerateAccount("RollupOwner") + l1info.GenerateAccount("Sequencer") + l1info.GenerateAccount("User") + l1info.GenerateAccount("Asserter") + l1info.GenerateAccount("EvilAsserter") + + SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ + l1info.PrepareTx("Faucet", "RollupOwner", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "Sequencer", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "Asserter", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "EvilAsserter", 30000, big.NewInt(9223372036854775807), nil), + }) + + l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + stakeToken, tx, tokenBindings, err := mocksgen.DeployTestWETH9( + &l1TransactionOpts, + l1client, + "Weth", + "WETH", + ) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) + stakeTokenAddr = stakeToken + value, ok := new(big.Int).SetString("10000", 10) + if !ok { + t.Fatal(t, "could not set value") + } + l1TransactionOpts.Value = value + tx, err = tokenBindings.Deposit(&l1TransactionOpts) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) + l1TransactionOpts.Value = nil + + addresses, assertionChainBindings := deployContractsOnly(t, ctx, l1info, l1client, chainConfig.ChainID, stakeToken) + + l1info.SetContract("Bridge", addresses.Bridge) + l1info.SetContract("SequencerInbox", addresses.SequencerInbox) + l1info.SetContract("Inbox", addresses.Inbox) + + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, getInitMessage(ctx, t, l1client, addresses), stackConfig, nil) + assertionChain = assertionChainBindings + var sequencerTxOptsPtr *bind.TransactOpts + var dataSigner signature.DataSignerFunc + if isSequencer { + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + sequencerTxOptsPtr = &sequencerTxOpts + dataSigner = signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) + } + + if !isSequencer { + nodeConfig.BatchPoster.Enable = false + nodeConfig.DelayedSequencer.Enable = false + } + + AddDefaultValNode(t, ctx, nodeConfig, true) + + execConfig := gethexec.ConfigDefaultTest() + Require(t, execConfig.Validate()) + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher) + Require(t, err) + + currentNode, err = arbnode.CreateNode( + ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, + addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, + ) + Require(t, err) + + Require(t, currentNode.Start(ctx)) + + l2client = ClientForStack(t, l2stack) + + StartWatchChanErr(t, ctx, fatalErrChan, currentNode) + + return +} + +func deployContractsOnly( + t *testing.T, + ctx context.Context, + l1info info, + backend *ethclient.Client, + chainId *big.Int, + stakeToken common.Address, +) (*chaininfo.RollupAddresses, *solimpl.AssertionChain) { + l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + wasmModuleRoot := locator.LatestWasmModuleRoot() + + prod := false + loserStakeEscrow := common.Address{} + miniStake := big.NewInt(1) + genesisExecutionState := rollupgen.ExecutionState{ + GlobalState: rollupgen.GlobalState{}, + MachineStatus: 1, + } + genesisInboxCount := big.NewInt(0) + anyTrustFastConfirmer := common.Address{} + cfg := challenge_testing.GenerateRollupConfig( + prod, + wasmModuleRoot, + l1TransactionOpts.From, + chainId, + loserStakeEscrow, + miniStake, + stakeToken, + genesisExecutionState, + genesisInboxCount, + anyTrustFastConfirmer, + challenge_testing.WithLayerZeroHeights(&protocol.LayerZeroHeights{ + BlockChallengeHeight: blockChallengeLeafHeight, + BigStepChallengeHeight: bigStepChallengeLeafHeight, + SmallStepChallengeHeight: smallStepChallengeLeafHeight, + }), + challenge_testing.WithNumBigStepLevels(uint8(5)), // TODO: Hardcoded. + challenge_testing.WithConfirmPeriodBlocks(uint64(150)), // TODO: Hardcoded. + ) + config, err := json.Marshal(params.ArbitrumDevTestChainConfig()) + Require(t, err) + cfg.ChainConfig = string(config) + addresses, err := setup.DeployFullRollupStack( + ctx, + backend, + &l1TransactionOpts, + l1info.GetAddress("Sequencer"), + cfg, + false, // do not use mock bridge. + false, // do not use a mock one step prover + ) + Require(t, err) + + asserter := l1info.GetDefaultTransactOpts("Asserter", ctx) + evilAsserter := l1info.GetDefaultTransactOpts("EvilAsserter", ctx) + chain, err := solimpl.NewAssertionChain( + ctx, + addresses.Rollup, + &asserter, + backend, + ) + Require(t, err) + + chalManager, err := chain.SpecChallengeManager(ctx) + Require(t, err) + chalManagerAddr := chalManager.Address() + seed, ok := new(big.Int).SetString("1000", 10) + if !ok { + t.Fatal("not ok") + } + value, ok := new(big.Int).SetString("10000", 10) + if !ok { + t.Fatal(t, "could not set value") + } + tokenBindings, err := mocksgen.NewTestWETH9(stakeToken, backend) + Require(t, err) + tx, err := tokenBindings.TestWETH9Transactor.Transfer(&l1TransactionOpts, asserter.From, seed) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&asserter, addresses.Rollup, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&asserter, chalManagerAddr, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + tx, err = tokenBindings.TestWETH9Transactor.Transfer(&l1TransactionOpts, evilAsserter.From, seed) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&evilAsserter, addresses.Rollup, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + tx, err = tokenBindings.TestWETH9Transactor.Approve(&evilAsserter, chalManagerAddr, value) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + return &chaininfo.RollupAddresses{ + Bridge: addresses.Bridge, + Inbox: addresses.Inbox, + SequencerInbox: addresses.SequencerInbox, + Rollup: addresses.Rollup, + ValidatorUtils: addresses.ValidatorUtils, + ValidatorWalletCreator: addresses.ValidatorWalletCreator, + DeployedAt: addresses.DeployedAt, + }, chain +} + +func create2ndNodeWithConfigForBoldProtocol( + t *testing.T, + ctx context.Context, + first *arbnode.Node, + l1stack *node.Node, + l1info *BlockchainTestInfo, + l2InitData *statetransfer.ArbosInitializationInfo, + nodeConfig *arbnode.Config, + stackConfig *node.Config, + stakeTokenAddr common.Address, +) (*ethclient.Client, *arbnode.Node, *solimpl.AssertionChain) { + fatalErrChan := make(chan error, 10) + l1rpcClient, err := l1stack.Attach() + if err != nil { + Fatal(t, err) + } + l1client := ethclient.NewClient(l1rpcClient) + firstExec, ok := first.Execution.(*gethexec.ExecutionNode) + if !ok { + Fatal(t, "not geth execution node") + } + chainConfig := firstExec.ArbInterface.BlockChain().Config() + addresses, assertionChain := deployContractsOnly(t, ctx, l1info, l1client, chainConfig.ChainID, stakeTokenAddr) + + l1info.SetContract("EvilBridge", addresses.Bridge) + l1info.SetContract("EvilSequencerInbox", addresses.SequencerInbox) + l1info.SetContract("EvilInbox", addresses.Inbox) + + if nodeConfig == nil { + nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() + } + nodeConfig.ParentChainReader.OldHeaderTimeout = 10 * time.Minute + nodeConfig.BatchPoster.DataPoster.MaxMempoolTransactions = 0 + if stackConfig == nil { + stackConfig = stackConfigForTest(t) + } + l2stack, err := node.New(stackConfig) + Require(t, err) + + l2chainDb, err := l2stack.OpenDatabase("chaindb", 0, 0, "", false) + Require(t, err) + l2arbDb, err := l2stack.OpenDatabase("arbdb", 0, 0, "", false) + Require(t, err) + + AddDefaultValNode(t, ctx, nodeConfig, true) + + dataSigner := signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) + txOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + initReader := statetransfer.NewMemoryInitDataReader(l2InitData) + initMessage := getInitMessage(ctx, t, l1client, first.DeployInfo) + + execConfig := gethexec.ConfigDefaultTest() + Require(t, execConfig.Validate()) + + l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, nil, initReader, chainConfig, initMessage, execConfig.TxLookupLimit, 0) + Require(t, err) + + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher) + Require(t, err) + l2node, err := arbnode.CreateNode(ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, addresses, &txOpts, &txOpts, dataSigner, fatalErrChan) + Require(t, err) + + Require(t, l2node.Start(ctx)) + + l2client := ClientForStack(t, l2stack) + + StartWatchChanErr(t, ctx, fatalErrChan, l2node) + + return l2client, l2node, assertionChain +} + +func makeBoldBatch( + t *testing.T, + l2Node *arbnode.Node, + l2Info *BlockchainTestInfo, + backend *ethclient.Client, + sequencer *bind.TransactOpts, + seqInbox *bridgegen.SequencerInbox, + seqInboxAddr common.Address, + messagesPerBatch, + divergeAtIndex int64, +) { + ctx := context.Background() + + batchBuffer := bytes.NewBuffer([]byte{}) + for i := int64(0); i < messagesPerBatch; i++ { + value := i + if i == divergeAtIndex { + value++ + } + err := writeTxToBatchBold(batchBuffer, l2Info.PrepareTx("Owner", "Destination", 1000000, big.NewInt(value), []byte{})) + Require(t, err) + } + compressed, err := arbcompress.CompressWell(batchBuffer.Bytes()) + Require(t, err) + message := append([]byte{0}, compressed...) + + seqNum := new(big.Int).Lsh(common.Big1, 256) + seqNum.Sub(seqNum, common.Big1) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin0(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + nodeSeqInbox, err := arbnode.NewSequencerInbox(backend, seqInboxAddr, 0) + Require(t, err) + batches, err := nodeSeqInbox.LookupBatchesInRange(ctx, receipt.BlockNumber, receipt.BlockNumber) + Require(t, err) + if len(batches) == 0 { + Fatal(t, "batch not found after AddSequencerL2BatchFromOrigin") + } + err = l2Node.InboxTracker.AddSequencerBatches(ctx, backend, batches) + Require(t, err) + _, err = l2Node.InboxTracker.GetBatchMetadata(0) + Require(t, err, "failed to get batch metadata after adding batch:") +} + +func writeTxToBatchBold(writer io.Writer, tx *types.Transaction) error { + txData, err := tx.MarshalBinary() + if err != nil { + return err + } + var segment []byte + segment = append(segment, arbstate.BatchSegmentKindL2Message) + segment = append(segment, arbos.L2MessageKind_SignedTx) + segment = append(segment, txData...) + err = rlp.Encode(writer, segment) + return err +} diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 19357c5b79..5d89a602e3 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -404,7 +404,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no nodeConf := ethconfig.Defaults nodeConf.NetworkId = chainConfig.ChainID.Uint64() - l1Genesis := core.DeveloperGenesisBlock(0, 15_000_000, l1info.GetAddress("Faucet")) + l1Genesis := core.DeveloperGenesisBlock(0, 50_000_000, l1info.GetAddress("Faucet")) infoGenesis := l1info.GetGenesisAlloc() for acct, info := range infoGenesis { l1Genesis.Alloc[acct] = info diff --git a/system_tests/manager_test.go b/system_tests/manager_test.go deleted file mode 100644 index 9bd72d1578..0000000000 --- a/system_tests/manager_test.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2023, Offchain Labs, Inc. -// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE -package arbtest - -import ( - "context" - "github.com/offchainlabs/nitro/util/testhelpers" - "math/big" - "reflect" - "strings" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - - "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbos/l2pricing" - "github.com/offchainlabs/nitro/staker" - "github.com/offchainlabs/nitro/util" - "github.com/offchainlabs/nitro/validator" - "github.com/offchainlabs/nitro/validator/valnode" - - protocol "github.com/OffchainLabs/bold/chain-abstraction" - commitments "github.com/OffchainLabs/bold/state-commitments/history" - prefixproofs "github.com/OffchainLabs/bold/state-commitments/prefix-proofs" -) - -const numOpcodesPerBigStepTest = uint64(4) -const maxWavmOpcodesTest = uint64(20) - -func TestExecutionStateMsgCount(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - res, err := l2node.TxStreamer.ResultAtCount(1) - Require(t, err) - msgCount, err := manager.ExecutionStateMsgCount(ctx, &protocol.ExecutionState{GlobalState: protocol.GoGlobalState{Batch: 1, BlockHash: res.BlockHash}}) - Require(t, err) - if msgCount != 1 { - Fail(t, "Unexpected msg batch", msgCount, "(expected 1)") - } -} - -func TestExecutionStateAtMessageNumber(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - res, err := l2node.TxStreamer.ResultAtCount(1) - Require(t, err) - expectedState := &protocol.ExecutionState{ - GlobalState: protocol.GoGlobalState{ - Batch: 1, - BlockHash: res.BlockHash, - }, - MachineStatus: protocol.MachineStatusFinished, - } - executionState, err := manager.ExecutionStateAtMessageNumber(ctx, 1) - Require(t, err) - if !reflect.DeepEqual(executionState, expectedState) { - Fail(t, "Unexpected executionState", executionState, "(expected ", expectedState, ")") - } - Require(t, err) -} - -func TestHistoryCommitmentUpTo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - res1, err := l2node.TxStreamer.ResultAtCount(1) - Require(t, err) - expectedHistoryCommitment, err := commitments.New( - []common.Hash{ - crypto.Keccak256Hash( - []byte("Machine finished:"), - validator.GoGlobalState{ - BlockHash: res1.BlockHash, - SendRoot: res1.SendRoot, - Batch: 1, - PosInBatch: 0, - }.Hash().Bytes(), - ), - }, - ) - Require(t, err) - historyCommitment, err := manager.HistoryCommitmentAtMessage(ctx, 1) - Require(t, err) - if !reflect.DeepEqual(historyCommitment, expectedHistoryCommitment) { - Fail(t, "Unexpected HistoryCommitment", historyCommitment, "(expected ", expectedHistoryCommitment, ")") - } -} - -func TestBigStepCommitmentUpTo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - commitment, err := manager.BigStepCommitmentUpTo(ctx, common.Hash{}, 1, 3) - Require(t, err) - if commitment.Height != 3 { - Fail(t, "Unexpected commitment height", commitment.Height, "(expected ", 3, ")") - } -} - -func TestSmallStepCommitmentUpTo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - commitment, err := manager.SmallStepCommitmentUpTo(ctx, common.Hash{}, 1, 3, 2) - Require(t, err) - if commitment.Height != 2 { - Fail(t, "Unexpected commitment height", commitment.Height, "(expected ", 2, ")") - } -} - -func TestHistoryCommitmentUpToBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - res1, err := l2node.TxStreamer.ResultAtCount(1) - Require(t, err) - expectedHistoryCommitment, err := commitments.New( - []common.Hash{ - crypto.Keccak256Hash( - []byte("Machine finished:"), - validator.GoGlobalState{ - BlockHash: res1.BlockHash, - SendRoot: res1.SendRoot, - Batch: 1, - PosInBatch: 0, - }.Hash().Bytes(), - ), - crypto.Keccak256Hash( - []byte("Machine finished:"), - validator.GoGlobalState{ - BlockHash: res1.BlockHash, - SendRoot: res1.SendRoot, - Batch: 1, - PosInBatch: 0, - }.Hash().Bytes(), - ), - }, - ) - Require(t, err) - historyCommitment, err := manager.HistoryCommitmentUpToBatch(ctx, 1, 2, 2) - Require(t, err) - if !reflect.DeepEqual(historyCommitment, expectedHistoryCommitment) { - Fail(t, "Unexpected HistoryCommitment", historyCommitment, "(expected ", expectedHistoryCommitment, ")") - } -} - -func TestBigStepLeafCommitment(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - commitment, err := manager.BigStepLeafCommitment(ctx, common.Hash{}, 1) - Require(t, err) - numBigSteps := maxWavmOpcodesTest / numOpcodesPerBigStepTest - if commitment.Height != numBigSteps { - Fail(t, "Unexpected commitment height", commitment.Height, "(expected ", numBigSteps, ")") - } -} - -func TestSmallStepLeafCommitment(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - commitment, err := manager.SmallStepLeafCommitment(ctx, common.Hash{}, 1, 3) - Require(t, err) - if commitment.Height != numOpcodesPerBigStepTest { - Fail(t, "Unexpected commitment height", commitment.Height, "(expected ", numOpcodesPerBigStepTest, ")") - } -} - -func TestAllPrefixProofs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - - from := uint64(1) - to := uint64(3) - - loCommit, err := manager.HistoryCommitmentUpToBatch(ctx, 1, from, 10) - Require(t, err) - hiCommit, err := manager.HistoryCommitmentUpToBatch(ctx, 1, to, 10) - Require(t, err) - packedProof, err := manager.PrefixProofUpToBatch(ctx, 1, from, to, 10) - Require(t, err) - - data, err := staker.ProofArgs.Unpack(packedProof) - Require(t, err) - preExpansion, ok := data[0].([][32]byte) - if !ok { - Fatal(t, "bad output from packedProof") - } - proof, ok := data[1].([][32]byte) - if !ok { - Fatal(t, "bad output from packedProof") - } - - preExpansionHashes := make([]common.Hash, len(preExpansion)) - for i := 0; i < len(preExpansion); i++ { - preExpansionHashes[i] = preExpansion[i] - } - prefixProof := make([]common.Hash, len(proof)) - for i := 0; i < len(proof); i++ { - prefixProof[i] = proof[i] - } - - err = prefixproofs.VerifyPrefixProof(&prefixproofs.VerifyPrefixProofConfig{ - PreRoot: loCommit.Merkle, - PreSize: from, - PostRoot: hiCommit.Merkle, - PostSize: to, - PreExpansion: preExpansionHashes, - PrefixProof: prefixProof, - }) - Require(t, err) - - bigFrom := uint64(1) - - bigCommit, err := manager.BigStepLeafCommitment(ctx, common.Hash{}, from) - Require(t, err) - - bigBisectCommit, err := manager.BigStepCommitmentUpTo(ctx, common.Hash{}, from, bigFrom) - Require(t, err) - if bigFrom != bigBisectCommit.Height { - Fail(t, "Unexpected bigBisectCommit Height", bigBisectCommit.Height, "(expected ", bigFrom, ")") - } - if bigCommit.FirstLeaf != bigBisectCommit.FirstLeaf { - Fail(t, "Unexpected bigBisectCommit FirstLeaf", bigBisectCommit.FirstLeaf, "(expected ", bigCommit.FirstLeaf, ")") - } - - bigProof, err := manager.BigStepPrefixProof(ctx, common.Hash{}, from, bigFrom, bigCommit.Height) - Require(t, err) - - data, err = staker.ProofArgs.Unpack(bigProof) - Require(t, err) - preExpansion, ok = data[0].([][32]byte) - if !ok { - Fatal(t, "bad output from packedProof") - } - proof, ok = data[1].([][32]byte) - if !ok { - Fatal(t, "bad output from packedProof") - } - - preExpansionHashes = make([]common.Hash, len(preExpansion)) - for i := 0; i < len(preExpansion); i++ { - preExpansionHashes[i] = preExpansion[i] - } - prefixProof = make([]common.Hash, len(proof)) - for i := 0; i < len(proof); i++ { - prefixProof[i] = proof[i] - } - - computed, err := prefixproofs.Root(preExpansionHashes) - Require(t, err) - if bigBisectCommit.Merkle != computed { - Fail(t, "Unexpected bigBisectCommit Merkle", bigBisectCommit.Merkle, "(expected ", computed, ")") - } - - err = prefixproofs.VerifyPrefixProof(&prefixproofs.VerifyPrefixProofConfig{ - PreRoot: bigBisectCommit.Merkle, - PreSize: bigFrom + 1, - PostRoot: bigCommit.Merkle, - PostSize: bigCommit.Height + 1, - PreExpansion: preExpansionHashes, - PrefixProof: prefixProof, - }) - Require(t, err) - - smallCommit, err := manager.SmallStepLeafCommitment(ctx, common.Hash{}, from, bigFrom) - Require(t, err) - - smallFrom := uint64(2) - - smallBisectCommit, err := manager.SmallStepCommitmentUpTo(ctx, common.Hash{}, from, bigFrom, smallFrom) - Require(t, err) - if smallBisectCommit.Height != smallFrom { - Fail(t, "Unexpected smallBisectCommit Height", smallBisectCommit.Height, "(expected ", smallFrom, ")") - } - if smallBisectCommit.FirstLeaf != smallCommit.FirstLeaf { - Fail(t, "Unexpected smallBisectCommit FirstLeaf", smallBisectCommit.FirstLeaf, "(expected ", smallCommit.FirstLeaf, ")") - } - - smallProof, err := manager.SmallStepPrefixProof(ctx, common.Hash{}, from, bigFrom, smallFrom, smallCommit.Height) - Require(t, err) - - data, err = staker.ProofArgs.Unpack(smallProof) - Require(t, err) - preExpansion, ok = data[0].([][32]byte) - if !ok { - Fatal(t, "bad output from packedProof") - } - proof, ok = data[1].([][32]byte) - if !ok { - Fatal(t, "bad output from packedProof") - } - - preExpansionHashes = make([]common.Hash, len(preExpansion)) - for i := 0; i < len(preExpansion); i++ { - preExpansionHashes[i] = preExpansion[i] - } - prefixProof = make([]common.Hash, len(proof)) - for i := 0; i < len(proof); i++ { - prefixProof[i] = proof[i] - } - - computed, err = prefixproofs.Root(preExpansionHashes) - Require(t, err) - if smallBisectCommit.Merkle != computed { - Fail(t, "Unexpected smallBisectCommit Merkle", smallBisectCommit.Merkle, "(expected ", computed, ")") - } - - err = prefixproofs.VerifyPrefixProof(&prefixproofs.VerifyPrefixProofConfig{ - PreRoot: smallBisectCommit.Merkle, - PreSize: smallFrom + 1, - PostRoot: smallCommit.Merkle, - PostSize: smallCommit.Height + 1, - PreExpansion: preExpansionHashes, - PrefixProof: prefixProof, - }) - Require(t, err) -} - -func TestPrefixProofUpToBatchInvalidBatchCount(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2node, l1stack, manager := setupManger(t, ctx) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - - _, err := manager.PrefixProofUpToBatch(ctx, 0, 0, 2, 1) - if err == nil || !strings.Contains(err.Error(), "toMessageNumber should not be greater than batchCount") { - Fail(t, "batch count", 1, "less than toMessageNumber", 2, "should not be allowed") - } -} -func setupManger(t *testing.T, ctx context.Context) (*arbnode.Node, *node.Node, *staker.StateManager) { - var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs - l2chainConfig := params.ArbitrumDevTestChainConfig() - l2info := NewBlockChainTestInfo( - t, - types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), - transferGas, - ) - _, l2node, l2client, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, l2chainConfig, nil, nil, l2info) - BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000)), l1info, l2info, l1client, l2client, ctx) - l2info.GenerateAccount("BackgroundUser") - balance := big.NewInt(params.Ether) - balance.Mul(balance, big.NewInt(100)) - tx := l2info.PrepareTx("Faucet", "BackgroundUser", l2info.TransferGas, balance, nil) - err := l2client.SendTransaction(ctx, tx) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) - Require(t, err) - - for i := uint64(0); i < 10; i++ { - l2info.Accounts["BackgroundUser"].Nonce = i - tx = l2info.PrepareTx("BackgroundUser", "BackgroundUser", l2info.TransferGas, common.Big0, nil) - err = l2client.SendTransaction(ctx, tx) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) - Require(t, err) - } - - _, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig) - blockValidatorConfig := staker.TestBlockValidatorConfig - stateless, err := staker.NewStatelessBlockValidator( - l2node.InboxReader, - l2node.InboxTracker, - l2node.TxStreamer, - l2node.Execution.Recorder, - l2node.ArbDB, - nil, - StaticFetcherFrom(t, &blockValidatorConfig), - valStack, - ) - Require(t, err) - err = stateless.Start(ctx) - Require(t, err) - manager, err := staker.NewStateManager(stateless, nil, numOpcodesPerBigStepTest, maxWavmOpcodesTest, t.TempDir()) - Require(t, err) - return l2node, l1stack, manager -} - -func Fail(t *testing.T, printables ...interface{}) { - t.Helper() - testhelpers.FailImpl(t, printables...) -} diff --git a/system_tests/state_provider_test.go b/system_tests/state_provider_test.go new file mode 100644 index 0000000000..6e59083c36 --- /dev/null +++ b/system_tests/state_provider_test.go @@ -0,0 +1,321 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE + +// race detection makes things slow and miss timeouts +//go:build challengetest && !race + +package arbtest + +import ( + "context" + "errors" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/util" + "github.com/offchainlabs/nitro/validator/valnode" + + protocol "github.com/OffchainLabs/bold/chain-abstraction" + "github.com/OffchainLabs/bold/containers/option" + l2stateprovider "github.com/OffchainLabs/bold/layer2-state-provider" + "github.com/OffchainLabs/bold/solgen/go/bridgegen" + prefixproofs "github.com/OffchainLabs/bold/state-commitments/prefix-proofs" + mockmanager "github.com/OffchainLabs/bold/testing/mocks/state-provider" +) + +func TestStateProvider_BOLD_Bisections(t *testing.T) { + t.Parallel() + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + l2node, l1info, l2info, l1stack, l1client, stateManager := setupBoldStateProvider(t, ctx) + defer requireClose(t, l1stack) + defer l2node.StopAndWait() + l2info.GenerateAccount("Destination") + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + seqInbox := l1info.GetAddress("SequencerInbox") + seqInboxBinding, err := bridgegen.NewSequencerInbox(seqInbox, l1client) + Require(t, err) + + // We will make two batches, with 5 messages in each batch. + numMessagesPerBatch := int64(5) + divergeAt := int64(-1) // No divergence. + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + numMessagesPerBatch = int64(10) + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + + bridgeBinding, err := bridgegen.NewBridge(l1info.GetAddress("Bridge"), l1client) + Require(t, err) + totalBatchesBig, err := bridgeBinding.SequencerMessageCount(&bind.CallOpts{Context: ctx}) + Require(t, err) + totalBatches := totalBatchesBig.Uint64() + totalMessageCount, err := l2node.InboxTracker.GetBatchMessageCount(totalBatches - 1) + Require(t, err) + + // Wait until the validator has validated the batches. + for { + if _, err := l2node.TxStreamer.ResultAtCount(arbutil.MessageIndex(totalMessageCount)); err == nil { + break + } + } + + historyCommitter := l2stateprovider.NewHistoryCommitmentProvider( + stateManager, + stateManager, + stateManager, []l2stateprovider.Height{ + 1 << 5, + 1 << 5, + 1 << 5, + }, + stateManager, + ) + bisectionHeight := l2stateprovider.Height(16) + request := &l2stateprovider.HistoryCommitmentRequest{ + WasmModuleRoot: common.Hash{}, + FromBatch: 1, + ToBatch: 3, + UpperChallengeOriginHeights: []l2stateprovider.Height{}, + FromHeight: 0, + UpToHeight: option.Some(bisectionHeight), + } + bisectionCommitment, err := historyCommitter.HistoryCommitment(ctx, request) + Require(t, err) + + request.UpToHeight = option.None[l2stateprovider.Height]() + packedProof, err := historyCommitter.PrefixProof(ctx, request, bisectionHeight) + Require(t, err) + + data, err := mockmanager.ProofArgs.Unpack(packedProof) + Require(t, err) + preExpansion, ok := data[0].([][32]byte) + if !ok { + Fatal(t, "wrong type") + } + + hashes := make([]common.Hash, len(preExpansion)) + for i, h := range preExpansion { + hash := h + hashes[i] = common.Hash(hash) + } + + computed, err := prefixproofs.Root(hashes) + Require(t, err) + if computed != bisectionCommitment.Merkle { + Fatal(t, "wrong commitment") + } +} + +func TestStateProvider_BOLD(t *testing.T) { + t.Parallel() + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + l2node, l1info, l2info, l1stack, l1client, stateManager := setupBoldStateProvider(t, ctx) + defer requireClose(t, l1stack) + defer l2node.StopAndWait() + l2info.GenerateAccount("Destination") + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + + seqInbox := l1info.GetAddress("SequencerInbox") + seqInboxBinding, err := bridgegen.NewSequencerInbox(seqInbox, l1client) + Require(t, err) + + // We will make two batches, with 5 messages in each batch. + numMessagesPerBatch := int64(5) + divergeAt := int64(-1) // No divergence. + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + makeBoldBatch(t, l2node, l2info, l1client, &sequencerTxOpts, seqInboxBinding, seqInbox, numMessagesPerBatch, divergeAt) + + bridgeBinding, err := bridgegen.NewBridge(l1info.GetAddress("Bridge"), l1client) + Require(t, err) + totalBatchesBig, err := bridgeBinding.SequencerMessageCount(&bind.CallOpts{Context: ctx}) + Require(t, err) + totalBatches := totalBatchesBig.Uint64() + totalMessageCount, err := l2node.InboxTracker.GetBatchMessageCount(totalBatches - 1) + Require(t, err) + + // Wait until the validator has validated the batches. + for { + if _, err := l2node.TxStreamer.ResultAtCount(arbutil.MessageIndex(totalMessageCount)); err == nil { + break + } + } + + t.Run("StatesInBatchRange", func(t *testing.T) { + fromBatch := l2stateprovider.Batch(1) + toBatch := l2stateprovider.Batch(3) + fromHeight := l2stateprovider.Height(0) + toHeight := l2stateprovider.Height(14) + stateRoots, states, err := stateManager.StatesInBatchRange(fromHeight, toHeight, fromBatch, toBatch) + Require(t, err) + + if len(stateRoots) != 15 { + Fatal(t, "wrong number of state roots") + } + if len(states) == 0 { + Fatal(t, "no states returned") + } + firstState := states[0] + if firstState.Batch != 1 && firstState.PosInBatch != 0 { + Fatal(t, "wrong first state") + } + lastState := states[len(states)-1] + if lastState.Batch != 1 && lastState.PosInBatch != 0 { + Fatal(t, "wrong last state") + } + }) + t.Run("AgreesWithExecutionState", func(t *testing.T) { + // Non-zero position in batch shoould fail. + err = stateManager.AgreesWithExecutionState(ctx, &protocol.ExecutionState{ + GlobalState: protocol.GoGlobalState{ + Batch: 0, + PosInBatch: 1, + }, + MachineStatus: protocol.MachineStatusFinished, + }) + if err == nil { + Fatal(t, "should not agree with execution state") + } + if !strings.Contains(err.Error(), "position in batch must be zero") { + Fatal(t, "wrong error message") + } + + // Always agrees with genesis. + err = stateManager.AgreesWithExecutionState(ctx, &protocol.ExecutionState{ + GlobalState: protocol.GoGlobalState{ + Batch: 0, + PosInBatch: 0, + }, + MachineStatus: protocol.MachineStatusFinished, + }) + Require(t, err) + + // Always agrees with the init message. + err = stateManager.AgreesWithExecutionState(ctx, &protocol.ExecutionState{ + GlobalState: protocol.GoGlobalState{ + Batch: 1, + PosInBatch: 0, + }, + MachineStatus: protocol.MachineStatusFinished, + }) + Require(t, err) + + // Chain catching up if it has not seen batch 10. + err = stateManager.AgreesWithExecutionState(ctx, &protocol.ExecutionState{ + GlobalState: protocol.GoGlobalState{ + Batch: 10, + PosInBatch: 0, + }, + MachineStatus: protocol.MachineStatusFinished, + }) + if err == nil { + Fatal(t, "should not agree with execution state") + } + if !errors.Is(err, staker.ErrChainCatchingUp) { + Fatal(t, "wrong error") + } + + // Check if we agree with the last posted batch to the inbox. + result, err := l2node.TxStreamer.ResultAtCount(arbutil.MessageIndex(totalMessageCount)) + Require(t, err) + + state := &protocol.ExecutionState{ + GlobalState: protocol.GoGlobalState{ + BlockHash: result.BlockHash, + SendRoot: result.SendRoot, + Batch: 3, + PosInBatch: 0, + }, + MachineStatus: protocol.MachineStatusFinished, + } + err = stateManager.AgreesWithExecutionState(ctx, state) + Require(t, err) + + // See if we agree with one batch immediately after that and see that we fail with + // "ErrChainCatchingUp". + state.GlobalState.Batch += 1 + + err = stateManager.AgreesWithExecutionState(ctx, state) + if err == nil { + Fatal(t, "should not agree with execution state") + } + if !errors.Is(err, staker.ErrChainCatchingUp) { + Fatal(t, "wrong error") + } + }) + t.Run("ExecutionStateAfterBatchCount", func(t *testing.T) { + _, err = stateManager.ExecutionStateAfterBatchCount(ctx, 0) + if err == nil { + Fatal(t, "should have failed") + } + if !strings.Contains(err.Error(), "batch count cannot be zero") { + Fatal(t, "wrong error message") + } + + execState, err := stateManager.ExecutionStateAfterBatchCount(ctx, totalBatches) + Require(t, err) + + // We should agree with the last posted batch to the inbox based on our + // retrieved execution state. + err = stateManager.AgreesWithExecutionState(ctx, execState) + Require(t, err) + }) +} + +func setupBoldStateProvider(t *testing.T, ctx context.Context) (*arbnode.Node, *BlockchainTestInfo, *BlockchainTestInfo, *node.Node, *ethclient.Client, *staker.StateManager) { + var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs + l2chainConfig := params.ArbitrumDevTestChainConfig() + l2info := NewBlockChainTestInfo( + t, + types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + transferGas, + ) + ownerBal := big.NewInt(params.Ether) + ownerBal.Mul(ownerBal, big.NewInt(1_000_000)) + l2info.GenerateGenesisAccount("Owner", ownerBal) + + _, l2node, _, _, l1info, _, l1client, l1stack, _, _ := createTestNodeOnL1ForBoldProtocol(t, ctx, true, nil, l2chainConfig, nil, l2info) + + valnode.TestValidationConfig.UseJit = false + _, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig) + blockValidatorConfig := staker.TestBlockValidatorConfig + + stateless, err := staker.NewStatelessBlockValidator( + l2node.InboxReader, + l2node.InboxTracker, + l2node.TxStreamer, + l2node.Execution, + l2node.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + err = stateless.Start(ctx) + Require(t, err) + + stateManager, err := staker.NewStateManager( + stateless, + "", + []l2stateprovider.Height{ + l2stateprovider.Height(blockChallengeLeafHeight), + l2stateprovider.Height(bigStepChallengeLeafHeight), + l2stateprovider.Height(smallStepChallengeLeafHeight), + }, + "good", + staker.DisableCache(), + ) + Require(t, err) + return l2node, l1info, l2info, l1stack, l1client, stateManager +} diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index 2fd3a92ab0..01a161bbbe 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -116,13 +116,8 @@ func (r *mockExecRun) GetStepAt(position uint64) containers.PromiseInterface[*va }, nil) } -func (r *mockExecRun) GetBigStepLeavesUpTo(toBigStep uint64, numOpcodesPerBigStep uint64) containers.PromiseInterface[[]common.Hash] { - // TODO: Add mock implementation for GetBigStepLeavesUpTo - return containers.NewReadyPromise[[]common.Hash](nil, nil) -} - -func (r *mockExecRun) GetSmallStepLeavesUpTo(bigStep uint64, toSmallStep uint64, numOpcodesPerBigStep uint64) containers.PromiseInterface[[]common.Hash] { - // TODO: Add mock implementation for GetSmallStepLeavesUpTo +func (r *mockExecRun) GetLeavesWithStepSize(machineStartIndex, stepSize, numDesiredLeaves uint64) containers.PromiseInterface[[]common.Hash] { + // TODO: Add mock implementation for GetLeavesWithStepSize return containers.NewReadyPromise[[]common.Hash](nil, nil) } diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index ff3b420a1c..04b9cf2660 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -313,7 +313,8 @@ func (s *HeaderReader) logIfHeaderIsOld() { } l1Timetamp := time.Unix(int64(storedHeader.Time), 0) headerTime := time.Since(l1Timetamp) - if headerTime >= s.config().OldHeaderTimeout { + oldHeaderTimeout := time.Minute * 10 + if headerTime >= oldHeaderTimeout { s.setError(fmt.Errorf("latest header is at least %v old", headerTime)) log.Error( "latest L1 block is old", "l1Block", storedHeader.Number, diff --git a/validator/interface.go b/validator/interface.go index 385604e9d0..da56be7ffb 100644 --- a/validator/interface.go +++ b/validator/interface.go @@ -30,8 +30,7 @@ type ExecutionSpawner interface { type ExecutionRun interface { GetStepAt(uint64) containers.PromiseInterface[*MachineStepResult] - GetBigStepLeavesUpTo(uint64, uint64) containers.PromiseInterface[[]common.Hash] - GetSmallStepLeavesUpTo(uint64, uint64, uint64) containers.PromiseInterface[[]common.Hash] + GetLeavesWithStepSize(machineStartIndex, stepSize, numDesiredLeaves uint64) containers.PromiseInterface[[]common.Hash] GetLastStep() containers.PromiseInterface[*MachineStepResult] GetProofAt(uint64) containers.PromiseInterface[[]byte] PrepareRange(uint64, uint64) containers.PromiseInterface[struct{}] diff --git a/validator/server_api/valiation_api.go b/validator/server_api/valiation_api.go index 094f7e473d..1848897521 100644 --- a/validator/server_api/valiation_api.go +++ b/validator/server_api/valiation_api.go @@ -142,26 +142,13 @@ func (a *ExecServerAPI) GetStepAt(ctx context.Context, execid uint64, position u return MachineStepResultToJson(res), nil } -func (a *ExecServerAPI) GetBigStepLeavesUpTo(ctx context.Context, execid uint64, toBigStep uint64, numOpcodesPerBigStep uint64) ([]common.Hash, error) { +func (a *ExecServerAPI) GetLeavesWithStepSize(ctx context.Context, execid, fromStep, stepSize, numDesiredLeaves uint64) ([]common.Hash, error) { run, err := a.getRun(execid) if err != nil { return nil, err } - bigStepLeavesUpTo := run.GetBigStepLeavesUpTo(toBigStep, numOpcodesPerBigStep) - res, err := bigStepLeavesUpTo.Await(ctx) - if err != nil { - return nil, err - } - return res, nil -} - -func (a *ExecServerAPI) GetSmallStepLeavesUpTo(ctx context.Context, execid uint64, bigStep uint64, toSmallStep uint64, numOpcodesPerBigStep uint64) ([]common.Hash, error) { - run, err := a.getRun(execid) - if err != nil { - return nil, err - } - smallStepLeavesUpTo := run.GetSmallStepLeavesUpTo(bigStep, toSmallStep, numOpcodesPerBigStep) - res, err := smallStepLeavesUpTo.Await(ctx) + leavesInRange := run.GetLeavesWithStepSize(fromStep, stepSize, numDesiredLeaves) + res, err := leavesInRange.Await(ctx) if err != nil { return nil, err } diff --git a/validator/server_api/validation_client.go b/validator/server_api/validation_client.go index 326cdb8c2f..ed055c3cfb 100644 --- a/validator/server_api/validation_client.go +++ b/validator/server_api/validation_client.go @@ -177,21 +177,10 @@ func (r *ExecutionClientRun) GetStepAt(pos uint64) containers.PromiseInterface[* }) } -func (r *ExecutionClientRun) GetBigStepLeavesUpTo(toBigStep uint64, numOpcodesPerBigStep uint64) containers.PromiseInterface[[]common.Hash] { +func (r *ExecutionClientRun) GetLeavesWithStepSize(machineStartIndex, stepSize, numDesiredLeaves uint64) containers.PromiseInterface[[]common.Hash] { return stopwaiter.LaunchPromiseThread[[]common.Hash](r, func(ctx context.Context) ([]common.Hash, error) { var resJson []common.Hash - err := r.client.client.CallContext(ctx, &resJson, Namespace+"_getBigStepLeavesUpTo", r.id, toBigStep, numOpcodesPerBigStep) - if err != nil { - return nil, err - } - return resJson, err - }) -} - -func (r *ExecutionClientRun) GetSmallStepLeavesUpTo(bigStep uint64, toSmallStep uint64, numOpcodesPerBigStep uint64) containers.PromiseInterface[[]common.Hash] { - return stopwaiter.LaunchPromiseThread[[]common.Hash](r, func(ctx context.Context) ([]common.Hash, error) { - var resJson []common.Hash - err := r.client.client.CallContext(ctx, &resJson, Namespace+"_getSmallStepLeavesUpTo", r.id, bigStep, toSmallStep, numOpcodesPerBigStep) + err := r.client.client.CallContext(ctx, &resJson, Namespace+"_getLeavesWithStepSize", r.id, machineStartIndex, stepSize, numDesiredLeaves) if err != nil { return nil, err } diff --git a/validator/server_arb/execution_run.go b/validator/server_arb/execution_run.go index 0ca939db7c..2018ef7baa 100644 --- a/validator/server_arb/execution_run.go +++ b/validator/server_arb/execution_run.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -56,38 +57,60 @@ func (e *executionRun) GetStepAt(position uint64) containers.PromiseInterface[*v }) } -func (e *executionRun) GetBigStepLeavesUpTo(toBigStep uint64, numOpcodesPerBigStep uint64) containers.PromiseInterface[[]common.Hash] { +func (e *executionRun) GetLeavesWithStepSize(machineStartIndex, stepSize, numDesiredLeaves uint64) containers.PromiseInterface[[]common.Hash] { return stopwaiter.LaunchPromiseThread[[]common.Hash](e, func(ctx context.Context) ([]common.Hash, error) { - var stateRoots []common.Hash - machine, err := e.cache.GetMachineAt(ctx, 0) + machine, err := e.cache.GetMachineAt(ctx, machineStartIndex) if err != nil { return nil, err } - if !machine.IsRunning() { + // If the machine is starting at index 0, we always want to start at the "Machine finished" global state status + // to align with the state roots that the inbox machine will produce. + var stateRoots []common.Hash + if machineStartIndex == 0 { + gs := machine.GetGlobalState() + hash := crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()) + stateRoots = append(stateRoots, hash) + } else { + // Otherwise, we simply append the machine hash at the specified start index. + stateRoots = append(stateRoots, machine.Hash()) + } + + // If we only want 1 state root, we can return early. + if numDesiredLeaves == 1 { return stateRoots, nil } - for i := uint64(0); i <= toBigStep; i++ { - position := i * numOpcodesPerBigStep - if err = machine.Step(ctx, position); err != nil { - return nil, err + for numIterations := uint64(0); numIterations < numDesiredLeaves; numIterations++ { + // The absolute opcode position the machine should be in after stepping. + position := machineStartIndex + stepSize*(numIterations+1) + + // Advance the machine in step size increments. + if err := machine.Step(ctx, stepSize); err != nil { + return nil, fmt.Errorf("failed to step machine to position %d: %w", position, err) + } + // If the machine reached the finished state, we can break out of the loop and append to + // our state roots slice a finished machine hash. + machineStep := machine.GetStepCount() + if validator.MachineStatus(machine.Status()) == validator.MachineStatusFinished { + gs := machine.GetGlobalState() + hash := crypto.Keccak256Hash([]byte("Machine finished:"), gs.Hash().Bytes()) + stateRoots = append(stateRoots, hash) + break + } + // Otherwise, if the position and machine step mismatch and the machine is running, something went wrong. + if position != machineStep { + machineRunning := machine.IsRunning() + if machineRunning || machineStep > position { + return nil, fmt.Errorf("machine is in wrong position want: %d, got: %d", position, machineStep) + } } stateRoots = append(stateRoots, machine.Hash()) } - return stateRoots, nil - }) -} -func (e *executionRun) GetSmallStepLeavesUpTo(bigStep uint64, toSmallStep uint64, numOpcodesPerBigStep uint64) containers.PromiseInterface[[]common.Hash] { - return stopwaiter.LaunchPromiseThread[[]common.Hash](e, func(ctx context.Context) ([]common.Hash, error) { - var stateRoots []common.Hash - fromSmall := bigStep * numOpcodesPerBigStep - toSmall := fromSmall + toSmallStep - for i := fromSmall; i <= toSmall; i++ { - machineStep, err := e.intermediateGetStepAt(ctx, i) - if err != nil { - return nil, err - } - stateRoots = append(stateRoots, machineStep.Hash) + // If the machine finished in less than the number of hashes we anticipate, we pad + // to the expected value by repeating the last machine hash until the state roots are the correct + // length. + for uint64(len(stateRoots)) < numDesiredLeaves { + stateRoots = append(stateRoots, stateRoots[len(stateRoots)-1]) } return stateRoots, nil })