diff --git a/README.md b/README.md index 7175a16..84cff98 100644 --- a/README.md +++ b/README.md @@ -290,6 +290,119 @@ Notably, the `ssz.DefineStaticBytes` call from our old code (which got given a ` Note, *checked methods* entail a runtime cost. When decoding such opaque slices, we can't blindly fill the fields with data, rather we need to ensure that they are allocated and that they are of the correct size. Ideally only use *checked methods* for prototyping or for pre-existing types where you just have to run with whatever you have and can't change the field to an array. +### Monolithic types + +We've seen previously, that asymmetric codecs can be used to implement custom serialization logic for types that might encode in a variety of ways depending on their data content. + +One verify specific subset of that scenario is the Ethereum consensus typeset. Whenever a new fork is released, a number of types are slightly modified, usually by adding new fields to existing structs. In the beacon specs, this usually results in an explosion of types: a new base type for fork X is created (e.g. `BeaconBlockBodyBellatrix`), but any type including that also needs to be re-created for fork X (e.g. `BeaconBlockBellatrix`), resulting in cascading type creations. Point in case, there are [79 consensus types in Prysm](https://pkg.go.dev/github.com/prysmaticlabs/prysm/v5@v5.1.0/proto/eth/v2#pkg-types), most of which are copies of one another with tiny additions. + +This design is definitely clean and works well if these containers are used just as data transmission objects or storage objects. However, operating on hundreds of types storing the same thing in a live codebase is unwieldy. In [go-ethereum](https://github.com/ethereum/go-ethereum) we've always used monolithic types that encode just right according to the RLP specs of EL forks and thus this library aims to provide similar support for the SSZ world too. + +We define a *monolithic type* as a container that can be encoded/decoded differently, based on what fork the codec runs in. To give an example, let's look at the previous `ExecutionPayload`, but instead of using it to represent a single possible consensus form, let's define *all* possible fields across all possible forks: + +```go +type ExecutionPayloadMonolith struct { + ParentHash Hash + FeeRecipient Address + StateRoot Hash + ReceiptsRoot Hash + LogsBloom LogsBLoom + PrevRandao Hash + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte + BaseFeePerGas *uint256.Int + BlockHash Hash + Transactions [][]byte + Withdrawals []*Withdrawal // Appears in the Shanghai fork + BlobGasUsed *uint64 // Appears in the Cancun fork + ExcessBlobGas *uint64 // Appears in the Cancun fork +} +``` + +Not much difference versus what we've used previously, but note, the fields that are fork-specific must all be nil-able (`Withdrawal` is a slice that can be `nil` and the blob gas fields are `*uint64`, which again can be `nil`). + +Like before, we need to implement the `SizeSSZ` method: + +```go +func (e *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { + // Start out with the static size + size := uint32(512) + if sizer.Fork() >= ssz.ForkShanghai { + size += 4 + } + if sizer.Fork() >= ssz.ForkCancun { + size += 16 + } + if fixed { + return size + } + // Append all the dynamic sizes + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + if sizer.Fork() >= ssz.ForkShanghai { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) + } + return size +} +``` + +This time, it was a bit more complex: + +- The static size can change depending on which fork we're encoding into. The base Frontier encoding is 512 bytes, but Shanghai adds the dynamic withdrawals (4 bytes static offset) and Cancun adds 2 static uint64s (2x8 bytes). You can retrieve what fork we're encoding into via the `ssz.Sizer` method argument. +- The dynamic size can change just the same, if we're encoding into Shanghai, we need to account for the withdrawals too. The uint64s are not dynamic, so they don't appear in the that section of the size. + +Similarly to how `SizeSSZ` needs to be fork-enabled, `DefineSSZ` goes through a transformation: + +```go +func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.ExtraData, 32) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContentOnFork(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes +} +``` + +The above code is eerily similar to our previous codec, yet, weirdly strange. Wherever fork specific fields appear, the methods get suffixed with `OnFork` and get passed the rule as to which fork to apply in (e.g. `ssz.ForkFilter{Added: ssz.ForkCancun}`). There are good reasons for both: + +- The `SizeSSZ` method used `if` clauses to check for forks and behaved differently based on which fork we're in. That is clean, however decoding has a quirk: if we decode into a pre-existing object (with fields set to arbitrary junk), the fields not present in a fork needs to be nil-ed out. As such, `if` clauses within the definitions won't work any more, we need to "define" missing fields too to ensure they get nil-ed correctly. Thus `OnFork` suffix for all fields, always. +- Of course, calling an `OnFork` method it kind of pointless without specifying which fork we want a field to be present in. That's the `ssz.ForkFilter` parameter. By making it a slightly more complex filter type, the SSZ library supports both adding new fields in a fork, and also removing old fields (both cases happened in the beacon chain). Other operations will be added as needed. + +Lastly, to encode the above `ExecutionPayloadMonolith` into an SSZ stream, we can't use the tried and proven `ssz.EncodeToStream`, since that will not know what fork we'd like to use. Rather, again, we need to call an `OnFork` version: + +```go +func main() { + out := new(bytes.Buffer) + if err := ssz.EncodeToStreamOnFork(out, new(ExecutionPayloadMonolith), ssz.ForkCancun); err != nil { + panic(err) + } + fmt.Printf("ssz: %#x\n", blob) +} +``` + +*As a side emphasis, although the SSZ library has the Ethereum hard-forks included (e.g. `ssz.ForkCancun` and `ssz.ForkDeneb`), there is nothing stopping a user of the library from using their own fork enum (e.g. `mypkg.ForkAlice` and `mypkg.ForkBob`), just type it with `ssz.Fork` and make sure `0` means some variation of `unknown`/`present in all forks`*. + ## Generated encoders More often than not, the Go structs that you'd like to serialize to/from SSZ are simple data containers. Without some particular quirk you'd like to explicitly support, there's little reason to spend precious time counting the bits and digging through a long list of encoder methods to call. @@ -378,7 +491,7 @@ type ExecutionPayload struct { } ``` -Calling the generator as before, just with the `ExecutionPayload` yields in the below, much more interesting code: +Calling the generator as before, just with the `ExecutionPayload` yields the below, fork-enhanced code: ```go // Code generated by github.com/karalabe/ssz. DO NOT EDIT. @@ -458,6 +571,101 @@ This functionality can be very helpful in detecting refactor issues, where the u As such, we'd recommend *always* tagging all SSZ encoded fields with their sizes. It results in both safer code and self-documenting code. +### Monolithic types + +This library supports monolithic types that encode differently based on what fork the codec is operating in. Naturally, that is a perfect example of something that would be useful to be able to generate, and indeed, can do. + +- Monolithic type fields can be tagged with a `ssz-fork:"name"` Go struct tag, which will be picked up and mapped by the code generator from their textual form to pre-declared fork identifiers. +- The fork names follow the Go build constraint rules: + - A field can be declared introduced in fork `X` via `ssz-fork:"x"`. + - A field can be declared removed in fork `X` via `ssz-fork:"!x"`. + +```go +type ExecutionPayloadMonolith struct { + ParentHash Hash + FeeRecipient Address + StateRoot Hash + ReceiptsRoot Hash + LogsBloom LogsBloom + PrevRandao Hash + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte `ssz-max:"32"` + BaseFeePerGas *uint256.Int + BlockHash Hash + Transactions [][]byte `ssz-max:"1048576,1073741824"` + Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"shanghai"` + BlobGasUsed *uint64 ` ssz-fork:"cancun"` + ExcessBlobGas *uint64 ` ssz-fork:"cancun"` +} +``` + +Calling the generator as before, just with the `ExecutionPayloadMonolith` yields the below, much more interesting code: + +```go +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package main + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + if sizer.Fork() >= ssz.ForkShanghai { + size += 4 + } + if sizer.Fork() >= ssz.ForkCancun { + size += 8 + 8 + } + if fixed { + return size + } + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + if sizer.Fork() >= ssz.ForkShanghai { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.ExtraData, 32) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes +} +``` + +To explicitly highlight, the `ssz-fork` tags have been extracted from the struct definition and mapped into both an updated `SizeSSZ` method as well as a new definition style in `DefineSSZ`. + +Do note, this type (or anything embedding it) will require the `OnFork` versions of `ssz.Encode`, `ssz.Decode`, `ssz.Hash` and `ssz.Size` to be called, since naturally it relies on a correct fork being set in the codec's context. + +*Lastly, whilst the library itself supports custom fork enums, there is no support yet for these in the code generator. This will probably be added eventually via a `--forks=mypkg` or similar CLI flag, but it's a TODO for now.* + ### Go generate Perhaps just a mention, anyone using the code generator should call it from a `go:generate` compile instruction. It is much simpler and once added to the code, it can always be called via running `go generate`. diff --git a/example_asymmetric_test.go b/example_asymmetric_test.go index d50ccf8..368db8b 100644 --- a/example_asymmetric_test.go +++ b/example_asymmetric_test.go @@ -41,11 +41,11 @@ func (w *WithdrawalAsym) DefineSSZ(codec *ssz.Codec) { } func ExampleEncodeAsymmetricObject() { - blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil), ssz.ForkUnknown)) - if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym), ssz.ForkUnknown); err != nil { + blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil))) + if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym)); err != nil { panic(err) } - hash := ssz.HashSequential(new(WithdrawalAsym), ssz.ForkUnknown) + hash := ssz.HashSequential(new(WithdrawalAsym)) fmt.Printf("ssz: %#x\nhash: %#x\n", blob, hash) // Output: diff --git a/example_checked_test.go b/example_checked_test.go index bca5f0f..41d9e16 100644 --- a/example_checked_test.go +++ b/example_checked_test.go @@ -30,7 +30,7 @@ func ExampleDecodeCheckedObject() { blob := make([]byte, 44) obj := new(WithdrawalChecked) - if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); err != nil { + if err := ssz.DecodeFromBytes(blob, obj); err != nil { panic(err) } fmt.Printf("obj: %#x\n", obj) diff --git a/example_dynamic_test.go b/example_dynamic_test.go index 1f244bc..86fd3e7 100644 --- a/example_dynamic_test.go +++ b/example_dynamic_test.go @@ -72,8 +72,8 @@ func (e *ExecutionPayload) DefineSSZ(codec *ssz.Codec) { func ExampleEncodeDynamicObject() { obj := new(ExecutionPayload) - blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)) - if err := ssz.EncodeToBytes(blob, obj, ssz.ForkUnknown); err != nil { + blob := make([]byte, ssz.Size(obj)) + if err := ssz.EncodeToBytes(blob, obj); err != nil { panic(err) } fmt.Printf("ssz: %#x\n", blob) diff --git a/example_static_test.go b/example_static_test.go index 4486222..70cbc08 100644 --- a/example_static_test.go +++ b/example_static_test.go @@ -31,10 +31,10 @@ func (w *Withdrawal) DefineSSZ(codec *ssz.Codec) { func ExampleEncodeStaticObject() { out := new(bytes.Buffer) - if err := ssz.EncodeToStream(out, new(Withdrawal), ssz.ForkUnknown); err != nil { + if err := ssz.EncodeToStream(out, new(Withdrawal)); err != nil { panic(err) } - hash := ssz.HashSequential(new(Withdrawal), ssz.ForkUnknown) + hash := ssz.HashSequential(new(Withdrawal)) fmt.Printf("ssz: %#x\nhash: %#x\n", out, hash) // Output: diff --git a/hasher.go b/hasher.go index 51d3b8a..3340aa7 100644 --- a/hasher.go +++ b/hasher.go @@ -572,7 +572,7 @@ func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems u defer h.ascendMixinLayer(uint64(len(objects)), maxItems) // If threading is disabled, or hashing nothing, do it sequentially - if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0], h.codec.fork)) < concurrencyThreshold { + if !h.threads || len(objects) == 0 || len(objects)*int(SizeOnFork(objects[0], h.codec.fork)) < concurrencyThreshold { for _, obj := range objects { h.descendLayer() obj.DefineSSZ(h.codec) diff --git a/ssz.go b/ssz.go index 42acef2..399df85 100644 --- a/ssz.go +++ b/ssz.go @@ -87,10 +87,21 @@ var sizerPool = sync.Pool{ }, } -// EncodeToStream serializes the object into a data stream. Do not use this -// method with a bytes.Buffer to write into a []byte slice, as that will do -// double the byte copying. For that use case, use EncodeToBytes instead. -func EncodeToStream(w io.Writer, obj Object, fork Fork) error { +// EncodeToStream serializes a non-monolithic object into a data stream. If the +// type contains fork-specific rules, use EncodeToStreamOnFork. +// +// Do not use this method with a bytes.Buffer to write into a []byte slice, as +// that will do double the byte copying. For that use case, use EncodeToBytes. +func EncodeToStream(w io.Writer, obj Object) error { + return EncodeToStreamOnFork(w, obj, ForkUnknown) +} + +// EncodeToStreamOnFork serializes a monolithic object into a data stream. If the +// type does not contain fork-specific rules, you can also use EncodeToStream. +// +// Do not use this method with a bytes.Buffer to write into a []byte slice, as that +// will do double the byte copying. For that use case, use EncodeToBytesOnFork. +func EncodeToStreamOnFork(w io.Writer, obj Object, fork Fork) error { codec := encoderPool.Get().(*Codec) defer encoderPool.Put(codec) @@ -113,13 +124,25 @@ func EncodeToStream(w io.Writer, obj Object, fork Fork) error { return err } -// EncodeToBytes serializes the object into a byte buffer. Don't use this method -// if you want to then write the buffer into a stream via some writer, as that -// would double the memory use for the temporary buffer. For that use case, use -// EncodeToStream instead. -func EncodeToBytes(buf []byte, obj Object, fork Fork) error { +// EncodeToBytes serializes a non-monolithic object into a byte buffer. If the +// type contains fork-specific rules, use EncodeToBytesOnFork. +// +// Don't use this method if you want to then write the buffer into a stream via +// some writer, as that would double the memory use for the temporary buffer. +// For that use case, use EncodeToStream. +func EncodeToBytes(buf []byte, obj Object) error { + return EncodeToBytesOnFork(buf, obj, ForkUnknown) +} + +// EncodeToBytesOnFork serializes a monolithic object into a byte buffer. If the +// type does not contain fork-specific rules, you can also use EncodeToBytes. +// +// Don't use this method if you want to then write the buffer into a stream via +// some writer, as that would double the memory use for the temporary buffer. +// For that use case, use EncodeToStreamOnFork. +func EncodeToBytesOnFork(buf []byte, obj Object, fork Fork) error { // Sanity check that we have enough space to serialize into - if size := Size(obj, fork); int(size) > len(buf) { + if size := SizeOnFork(obj, fork); int(size) > len(buf) { return fmt.Errorf("%w: buffer %d bytes, object %d bytes", ErrBufferTooSmall, len(buf), size) } codec := encoderPool.Get().(*Codec) @@ -144,10 +167,22 @@ func EncodeToBytes(buf []byte, obj Object, fork Fork) error { return err } -// DecodeFromStream parses an object with the given size out of a stream. Do not -// use this method with a bytes.Buffer to read from a []byte slice, as that will -// double the byte copying. For that use case, use DecodeFromBytes instead. -func DecodeFromStream(r io.Reader, obj Object, size uint32, fork Fork) error { +// DecodeFromStream parses a non-monolithic object with the given size out of a +// stream. If the type contains fork-specific rules, use DecodeFromStreamOnFork. +// +// Do not use this method with a bytes.Buffer to read from a []byte slice, as that +// will double the byte copying. For that use case, use DecodeFromBytes. +func DecodeFromStream(r io.Reader, obj Object, size uint32) error { + return DecodeFromStreamOnFork(r, obj, size, ForkUnknown) +} + +// DecodeFromStreamOnFork parses a monolithic object with the given size out of +// a stream. If the type does not contain fork-specific rules, you can also use +// DecodeFromStream. +// +// Do not use this method with a bytes.Buffer to read from a []byte slice, as that +// will double the byte copying. For that use case, use DecodeFromBytesOnFork. +func DecodeFromStreamOnFork(r io.Reader, obj Object, size uint32, fork Fork) error { // Retrieve a new decoder codec and set its data source codec := decoderPool.Get().(*Codec) defer decoderPool.Put(codec) @@ -178,11 +213,23 @@ func DecodeFromStream(r io.Reader, obj Object, size uint32, fork Fork) error { return err } -// DecodeFromBytes parses an object from a byte buffer. Do not use this method -// if you want to first read the buffer from a stream via some reader, as that -// would double the memory use for the temporary buffer. For that use case, use -// DecodeFromStream instead. -func DecodeFromBytes(blob []byte, obj Object, fork Fork) error { +// DecodeFromBytes parses a non-monolithic object from a byte buffer. If the type +// contains fork-specific rules, use DecodeFromBytesOnFork. +// +// Do not use this method if you want to first read the buffer from a stream via +// some reader, as that would double the memory use for the temporary buffer. For +// that use case, use DecodeFromStream instead. +func DecodeFromBytes(blob []byte, obj Object) error { + return DecodeFromBytesOnFork(blob, obj, ForkUnknown) +} + +// DecodeFromBytesOnFork parses a monolithic object from a byte buffer. If the +// type does not contain fork-specific rules, you can also use DecodeFromBytes. +// +// Do not use this method if you want to first read the buffer from a stream via +// some reader, as that would double the memory use for the temporary buffer. For +// that use case, use DecodeFromStreamOnFork instead. +func DecodeFromBytesOnFork(blob []byte, obj Object, fork Fork) error { // Reject decoding from an empty slice if len(blob) == 0 { return io.ErrUnexpectedEOF @@ -220,10 +267,21 @@ func DecodeFromBytes(blob []byte, obj Object, fork Fork) error { return err } -// HashSequential computes the ssz merkle root of the object on a single thread. -// This is useful for processing small objects with stable runtime and O(1) GC -// guarantees. -func HashSequential(obj Object, fork Fork) [32]byte { +// HashSequential computes the merkle root of a non-monolithic object on a single +// thread. This is useful for processing small objects with stable runtime and O(1) +// GC guarantees. +// +// If the type contains fork-specific rules, use HashSequentialOnFork. +func HashSequential(obj Object) [32]byte { + return HashSequentialOnFork(obj, ForkUnknown) +} + +// HashSequentialOnFork computes the merkle root of a monolithic object on a single +// thread. This is useful for processing small objects with stable runtime and O(1) +// GC guarantees. +// +// If the type does not contain fork-specific rules, you can also use HashSequential. +func HashSequentialOnFork(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() @@ -240,11 +298,23 @@ func HashSequential(obj Object, fork Fork) [32]byte { return codec.has.chunks[0] } -// HashConcurrent computes the ssz merkle root of the object on potentially multiple -// concurrent threads (iff some data segments are large enough to be worth it). This -// is useful for processing large objects, but will place a bigger load on your CPU -// and GC; and might be more variable timing wise depending on other load. -func HashConcurrent(obj Object, fork Fork) [32]byte { +// HashConcurrent computes the merkle root of a non-monolithic object on potentially +// multiple concurrent threads (iff some data segments are large enough to be worth +// it). This is useful for processing large objects, but will place a bigger load on +// your CPU and GC; and might be more variable timing wise depending on other load. +// +// If the type contains fork-specific rules, use HashConcurrentOnFork. +func HashConcurrent(obj Object) [32]byte { + return HashConcurrentOnFork(obj, ForkUnknown) +} + +// HashConcurrentOnFork computes the merkle root of a monolithic object on potentially +// multiple concurrent threads (iff some data segments are large enough to be worth +// it). This is useful for processing large objects, but will place a bigger load on +// your CPU and GC; and might be more variable timing wise depending on other load. +// +// If the type does not contain fork-specific rules, you can also use HashConcurrent. +func HashConcurrentOnFork(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() @@ -263,9 +333,16 @@ func HashConcurrent(obj Object, fork Fork) [32]byte { return codec.has.chunks[0] } -// Size retrieves the size of a ssz object, independent if it's a static or a -// dynamic one. -func Size(obj Object, fork Fork) uint32 { +// Size retrieves the size of a non-monolithic object, independent if it is static +// or dynamic. If the type contains fork-specific rules, use SizeOnFork. +func Size(obj Object) uint32 { + return SizeOnFork(obj, ForkUnknown) +} + +// SizeOnFork retrieves the size of a monolithic object, independent if it is +// static or dynamic. If the type does not contain fork-specific rules, you can +// also use Size. +func SizeOnFork(obj Object, fork Fork) uint32 { sizer := sizerPool.Get().(*Sizer) defer sizerPool.Put(sizer) diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index 77f4540..51cfee3 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -109,11 +109,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st // from yaml and check that too, but hex-in-yaml makes everything // beyond annoying. C'est la vie. obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkUnknown); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { t.Fatalf("failed to decode SSZ stream: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj, ssz.ForkUnknown); err != nil { + if err := ssz.EncodeToStream(blob, obj); err != nil { t.Fatalf("failed to re-encode SSZ stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -122,11 +122,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkUnknown); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { t.Fatalf("failed to decode SSZ buffer: %v", err) } - bin := make([]byte, ssz.Size(obj, ssz.ForkUnknown)) - if err := ssz.EncodeToBytes(bin, obj, ssz.ForkUnknown); err != nil { + bin := make([]byte, ssz.Size(obj)) + if err := ssz.EncodeToBytes(bin, obj); err != nil { t.Fatalf("failed to re-encode SSZ buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -136,14 +136,14 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st } // Encoder/decoder seems to work, check if the size reported by the // encoded object actually matches the encoded stream - if size := ssz.Size(obj, ssz.ForkUnknown); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } - hash := ssz.HashSequential(obj, ssz.ForkUnknown) + hash := ssz.HashSequential(obj) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("sequential merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } - hash = ssz.HashConcurrent(obj, ssz.ForkUnknown) + hash = ssz.HashConcurrent(obj) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("concurrent merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } @@ -177,11 +177,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st } // Try to decode, it should fail obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkUnknown); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err == nil { t.Fatalf("succeeded in decoding invalid SSZ stream") } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkUnknown); err == nil { + if err := ssz.DecodeFromBytes(inSSZ, obj); err == nil { t.Fatalf("succeeded in decoding invalid SSZ buffer") } }) @@ -349,11 +349,11 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, // from yaml and check that too, but hex-in-yaml makes everything // beyond annoying. C'est la vie. obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { + if err := ssz.DecodeFromStreamOnFork(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to decode SSZ stream: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj, ssz.ForkMapping[fork]); err != nil { + if err := ssz.EncodeToStreamOnFork(blob, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to re-encode SSZ stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -362,11 +362,11 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { + if err := ssz.DecodeFromBytesOnFork(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to decode SSZ buffer: %v", err) } - bin := make([]byte, ssz.Size(obj, ssz.ForkMapping[fork])) - if err := ssz.EncodeToBytes(bin, obj, ssz.ForkMapping[fork]); err != nil { + bin := make([]byte, ssz.SizeOnFork(obj, ssz.ForkMapping[fork])) + if err := ssz.EncodeToBytesOnFork(bin, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to re-encode SSZ buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -376,14 +376,14 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, } // Encoder/decoder seems to work, check if the size reported by the // encoded object actually matches the encoded stream - if size := ssz.Size(obj, ssz.ForkMapping[fork]); size != uint32(len(inSSZ)) { + if size := ssz.SizeOnFork(obj, ssz.ForkMapping[fork]); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } - hash := ssz.HashSequential(obj, ssz.ForkMapping[fork]) + hash := ssz.HashSequentialOnFork(obj, ssz.ForkMapping[fork]) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("sequential merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } - hash = ssz.HashConcurrent(obj, ssz.ForkMapping[fork]) + hash = ssz.HashConcurrentOnFork(obj, ssz.ForkMapping[fork]) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("concurrent merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } @@ -445,7 +445,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.Fatalf("failed to parse snappy ssz binary: %v", err) } inObj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), inObj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { + if err := ssz.DecodeFromStreamOnFork(bytes.NewReader(inSSZ), inObj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } // Start the benchmarks for all the different operations @@ -455,7 +455,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.EncodeToStream(io.Discard, inObj, ssz.ForkMapping[fork]); err != nil { + if err := ssz.EncodeToStreamOnFork(io.Discard, inObj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to encode SSZ stream: %v", err) } } @@ -468,7 +468,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.EncodeToBytes(blob, inObj, ssz.ForkMapping[fork]); err != nil { + if err := ssz.EncodeToBytesOnFork(blob, inObj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to encode SSZ bytes: %v", err) } } @@ -482,7 +482,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.DecodeFromStream(r, obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { + if err := ssz.DecodeFromStreamOnFork(r, obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } r.Reset(inSSZ) @@ -496,14 +496,14 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { + if err := ssz.DecodeFromBytesOnFork(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } } }) b.Run(fmt.Sprintf("%s/merkleize-sequential", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { + if err := ssz.DecodeFromBytesOnFork(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } b.SetBytes(int64(len(inSSZ))) @@ -511,12 +511,12 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - ssz.HashSequential(obj, ssz.ForkMapping[fork]) + ssz.HashSequentialOnFork(obj, ssz.ForkMapping[fork]) } }) b.Run(fmt.Sprintf("%s/merkleize-concurrent", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { + if err := ssz.DecodeFromBytesOnFork(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } b.SetBytes(int64(len(inSSZ))) @@ -524,7 +524,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - ssz.HashConcurrent(obj, ssz.ForkMapping[fork]) + ssz.HashConcurrentOnFork(obj, ssz.ForkMapping[fork]) } }) } @@ -717,7 +717,7 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) f.Fatalf("failed to parse snappy ssz binary: %v", err) } obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { + if err := ssz.DecodeFromStreamOnFork(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { // Stash away all valid ssz streams so we can play with decoding // into previously used objects valids = append(valids, inSSZ) @@ -734,11 +734,11 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) // Try the stream encoder/decoder obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { + if err := ssz.DecodeFromStreamOnFork(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { // Stream decoder succeeded, make sure it re-encodes correctly and // that the buffer decoder also succeeds parsing blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj, ssz.ForkFuture); err != nil { + if err := ssz.EncodeToStreamOnFork(blob, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -746,27 +746,27 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded stream mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err != nil { + if err := ssz.DecodeFromBytesOnFork(inSSZ, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to decode buffer: %v", err) } // Sanity check that hashing and size retrieval works - hash1 := ssz.HashSequential(obj, ssz.ForkFuture) - hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) + hash1 := ssz.HashSequentialOnFork(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrentOnFork(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { + if size := ssz.SizeOnFork(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } valid = true } // Try the buffer encoder/decoder obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err == nil { + if err := ssz.DecodeFromBytesOnFork(inSSZ, obj, ssz.ForkFuture); err == nil { // Buffer decoder succeeded, make sure it re-encodes correctly and // that the stream decoder also succeeds parsing - bin := make([]byte, ssz.Size(obj, ssz.ForkFuture)) - if err := ssz.EncodeToBytes(bin, obj, ssz.ForkFuture); err != nil { + bin := make([]byte, ssz.SizeOnFork(obj, ssz.ForkFuture)) + if err := ssz.EncodeToBytesOnFork(bin, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -774,16 +774,16 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded buffer mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", bin, inSSZ, len(prefix), bin[len(prefix):], inSSZ[len(prefix):]) } - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { + if err := ssz.DecodeFromStreamOnFork(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { t.Fatalf("failed to decode stream: %v", err) } // Sanity check that hashing and size retrieval works - hash1 := ssz.HashSequential(obj, ssz.ForkFuture) - hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) + hash1 := ssz.HashSequentialOnFork(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrentOnFork(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { + if size := ssz.SizeOnFork(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } } @@ -794,14 +794,14 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) // Try the stream encoder/decoder into a prepped object obj = T(new(U)) - if err := ssz.DecodeFromBytes(vSSZ, obj, ssz.ForkFuture); err != nil { + if err := ssz.DecodeFromBytesOnFork(vSSZ, obj, ssz.ForkFuture); err != nil { panic(err) // we've already decoded this, cannot fail } - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { + if err := ssz.DecodeFromStreamOnFork(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { t.Fatalf("failed to decode stream into used object: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj, ssz.ForkFuture); err != nil { + if err := ssz.EncodeToStreamOnFork(blob, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode stream from used object: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -809,24 +809,24 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded stream from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } - hash1 := ssz.HashSequential(obj, ssz.ForkFuture) - hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) + hash1 := ssz.HashSequentialOnFork(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrentOnFork(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { + if size := ssz.SizeOnFork(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } // Try the buffer encoder/decoder into a prepped object obj = T(new(U)) - if err := ssz.DecodeFromBytes(vSSZ, obj, ssz.ForkFuture); err != nil { + if err := ssz.DecodeFromBytesOnFork(vSSZ, obj, ssz.ForkFuture); err != nil { panic(err) // we've already decoded this, cannot fail } - if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err != nil { + if err := ssz.DecodeFromBytesOnFork(inSSZ, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to decode buffer into used object: %v", err) } - bin := make([]byte, ssz.Size(obj, ssz.ForkFuture)) - if err := ssz.EncodeToBytes(bin, obj, ssz.ForkFuture); err != nil { + bin := make([]byte, ssz.SizeOnFork(obj, ssz.ForkFuture)) + if err := ssz.EncodeToBytesOnFork(bin, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode buffer from used object: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -834,12 +834,12 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded buffer from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), bin[len(prefix):], inSSZ[len(prefix):]) } - hash1 = ssz.HashSequential(obj, ssz.ForkFuture) - hash2 = ssz.HashConcurrent(obj, ssz.ForkFuture) + hash1 = ssz.HashSequentialOnFork(obj, ssz.ForkFuture) + hash2 = ssz.HashConcurrentOnFork(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { + if size := ssz.SizeOnFork(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } } diff --git a/tests/corner_cases_test.go b/tests/corner_cases_test.go index 23d1b13..9564bf4 100644 --- a/tests/corner_cases_test.go +++ b/tests/corner_cases_test.go @@ -19,19 +19,19 @@ import ( func TestDecodeMissized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)+1) - if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { + blob := make([]byte, ssz.Size(obj)+1) + if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } - if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob)), ssz.ForkUnknown); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { + if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob))); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { t.Errorf("decode from stream error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } - blob = make([]byte, ssz.Size(obj, ssz.ForkUnknown)-1) - if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, io.ErrUnexpectedEOF) { + blob = make([]byte, ssz.Size(obj)-1) + if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, io.ErrUnexpectedEOF) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, io.ErrUnexpectedEOF) } - if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob)), ssz.ForkUnknown); !errors.Is(err, io.ErrUnexpectedEOF) { + if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob))); !errors.Is(err, io.ErrUnexpectedEOF) { t.Errorf("decode from stream error mismatch: have %v, want %v", err, io.ErrUnexpectedEOF) } } @@ -50,11 +50,11 @@ func (t *testMissizedType) DefineSSZ(codec *ssz.Codec) { func TestEncodeOversized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)-1) - if err := ssz.EncodeToBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, ssz.ErrBufferTooSmall) { + blob := make([]byte, ssz.Size(obj)-1) + if err := ssz.EncodeToBytes(blob, obj); !errors.Is(err, ssz.ErrBufferTooSmall) { t.Errorf("encode to bytes error mismatch: have %v, want %v", err, ssz.ErrBufferTooSmall) } - if err := ssz.EncodeToStream(&testEncodeOversizedStream{blob}, obj, ssz.ForkUnknown); err == nil { + if err := ssz.EncodeToStream(&testEncodeOversizedStream{blob}, obj); err == nil { t.Errorf("encode to stream error mismatch: have nil, want stream full") // wonky, but should be fine } } @@ -85,7 +85,7 @@ func TestZeroCounterOffset(t *testing.T) { if err != nil { panic(err) } - err = ssz.DecodeFromBytes(inSSZ, new(types.ExecutionPayload), ssz.ForkUnknown) + err = ssz.DecodeFromBytes(inSSZ, new(types.ExecutionPayload)) if !errors.Is(err, ssz.ErrZeroCounterOffset) { t.Errorf("decode error mismatch: have %v, want %v", err, ssz.ErrZeroCounterOffset) } @@ -97,7 +97,7 @@ func TestInvalidBoolean(t *testing.T) { if err != nil { panic(err) } - err = ssz.DecodeFromBytes(inSSZ, new(types.Validator), ssz.ForkUnknown) + err = ssz.DecodeFromBytes(inSSZ, new(types.Validator)) if !errors.Is(err, ssz.ErrInvalidBoolean) { t.Errorf("decode error mismatch: have %v, want %v", err, ssz.ErrInvalidBoolean) } diff --git a/tests/zeroval_test.go b/tests/zeroval_test.go index 0ee6c8c..8680366 100644 --- a/tests/zeroval_test.go +++ b/tests/zeroval_test.go @@ -19,11 +19,11 @@ func testZeroValue[T newableObject[U], U any](t *testing.T, fork ssz.Fork) { // Verify that streaming/buffering encoding of a zero value results in the // same binary (maybe incorrect, we just want to see that they're the same). str1 := new(bytes.Buffer) - if err := ssz.EncodeToStream(str1, T(new(U)), fork); err != nil { + if err := ssz.EncodeToStreamOnFork(str1, T(new(U)), fork); err != nil { t.Fatalf("failed to stream-encode zero-value object: %v", err) } - bin1 := make([]byte, ssz.Size(T(new(U)), fork)) - if err := ssz.EncodeToBytes(bin1, T(new(U)), fork); err != nil { + bin1 := make([]byte, ssz.SizeOnFork(T(new(U)), fork)) + if err := ssz.EncodeToBytesOnFork(bin1, T(new(U)), fork); err != nil { t.Fatalf("failed to buffer-encode zero-value object: %v", err) } if !bytes.Equal(str1.Bytes(), bin1) { @@ -32,11 +32,11 @@ func testZeroValue[T newableObject[U], U any](t *testing.T, fork ssz.Fork) { // Decode the previous encoding in both streaming/buffering mode and check // that the produced objects are the same. obj1 := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(bin1), T(new(U)), uint32(len(bin1)), fork); err != nil { + if err := ssz.DecodeFromStreamOnFork(bytes.NewReader(bin1), T(new(U)), uint32(len(bin1)), fork); err != nil { t.Fatalf("failed to stream-decode zero-value object: %v", err) } obj2 := T(new(U)) - if err := ssz.DecodeFromBytes(bin1, T(new(U)), fork); err != nil { + if err := ssz.DecodeFromBytesOnFork(bin1, T(new(U)), fork); err != nil { t.Fatalf("failed to buffer-decode zero-value object: %v", err) } if !reflect.DeepEqual(obj1, obj2) { @@ -46,11 +46,11 @@ func testZeroValue[T newableObject[U], U any](t *testing.T, fork ssz.Fork) { // nil-ness might be different. To verify that the decoding was successful, do // yet another round of encodings and check that to the original ones. str2 := new(bytes.Buffer) - if err := ssz.EncodeToStream(str2, obj1, fork); err != nil { + if err := ssz.EncodeToStreamOnFork(str2, obj1, fork); err != nil { t.Fatalf("failed to stream-encode decoded object: %v", err) } - bin2 := make([]byte, ssz.Size(obj1, fork)) - if err := ssz.EncodeToBytes(bin2, obj1, fork); err != nil { + bin2 := make([]byte, ssz.SizeOnFork(obj1, fork)) + if err := ssz.EncodeToBytesOnFork(bin2, obj1, fork); err != nil { t.Fatalf("failed to buffer-encode decoded object: %v", err) } if !bytes.Equal(str2.Bytes(), bin2) { @@ -62,10 +62,10 @@ func testZeroValue[T newableObject[U], U any](t *testing.T, fork ssz.Fork) { // Encoding/decoding seems to work, hash the zero-value and re-encoded value // in both sequential/concurrent more and verify the results. hashes := map[string][32]byte{ - "zero-value-sequential": ssz.HashSequential(T(new(U)), fork), - "zero-value-concurrent": ssz.HashConcurrent(T(new(U)), fork), - "decoded-sequential": ssz.HashSequential(obj1, fork), - "decoded-concurrent": ssz.HashSequential(obj1, fork), + "zero-value-sequential": ssz.HashSequentialOnFork(T(new(U)), fork), + "zero-value-concurrent": ssz.HashConcurrentOnFork(T(new(U)), fork), + "decoded-sequential": ssz.HashSequentialOnFork(obj1, fork), + "decoded-concurrent": ssz.HashSequentialOnFork(obj1, fork), } for key1, hash1 := range hashes { for key2, hash2 := range hashes {