-
Notifications
You must be signed in to change notification settings - Fork 8
/
ssz.go
361 lines (310 loc) · 11.6 KB
/
ssz.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
// ssz: Go Simple Serialize (SSZ) codec library
// Copyright 2024 ssz Authors
// SPDX-License-Identifier: BSD-3-Clause
// Package ssz is a simplified SSZ encoder/decoder.
package ssz
import (
"fmt"
"io"
"sync"
"unsafe"
)
// Object defines the methods a type needs to implement to be used as a ssz
// encodable and decodable object.
type Object interface {
// DefineSSZ defines how an object would be encoded/decoded.
DefineSSZ(codec *Codec)
}
// StaticObject defines the methods a type needs to implement to be used as a
// ssz encodable and decodable static object.
type StaticObject interface {
Object
// SizeSSZ returns the total size of the ssz object.
//
// Note, StaticObject.SizeSSZ and DynamicObject.SizeSSZ deliberately clash
// to allow the compiler to detect placing one or the other in reversed data
// slots on an SSZ containers.
SizeSSZ(siz *Sizer) uint32
}
// DynamicObject defines the methods a type needs to implement to be used as a
// ssz encodable and decodable dynamic object.
type DynamicObject interface {
Object
// SizeSSZ returns either the static size of the object if fixed == true, or
// the total size otherwise.
//
// Note, StaticObject.SizeSSZ and DynamicObject.SizeSSZ deliberately clash
// to allow the compiler to detect placing one or the other in reversed data
// slots on an SSZ containers.
SizeSSZ(siz *Sizer, fixed bool) uint32
}
// encoderPool is a pool of SSZ encoders to reuse some tiny internal helpers
// without hitting Go's GC constantly.
var encoderPool = sync.Pool{
New: func() any {
codec := &Codec{enc: new(Encoder)}
codec.enc.codec = codec
codec.enc.sizer = &Sizer{codec: codec}
return codec
},
}
// decoderPool is a pool of SSZ decoders to reuse some tiny internal helpers
// without hitting Go's GC constantly.
var decoderPool = sync.Pool{
New: func() any {
codec := &Codec{dec: new(Decoder)}
codec.dec.codec = codec
codec.dec.sizer = &Sizer{codec: codec}
return codec
},
}
// hasherPool is a pool of SSZ hashers to reuse some tiny internal helpers
// without hitting Go's GC constantly.
var hasherPool = sync.Pool{
New: func() any {
codec := &Codec{has: new(Hasher)}
codec.has.codec = codec
codec.has.sizer = &Sizer{codec: codec}
return codec
},
}
// sizerPool is a pool of SSZ sizers to reuse some tiny internal helpers
// without hitting Go's GC constantly.
var sizerPool = sync.Pool{
New: func() any {
return &Sizer{codec: new(Codec)}
},
}
// EncodeToStream serializes a non-monolithic object into a data stream. If the
// type contains fork-specific rules, use EncodeToStreamOnFork.
//
// Do not use this method with a bytes.Buffer to write into a []byte slice, as
// that will do double the byte copying. For that use case, use EncodeToBytes.
func EncodeToStream(w io.Writer, obj Object) error {
return EncodeToStreamOnFork(w, obj, ForkUnknown)
}
// EncodeToStreamOnFork serializes a monolithic object into a data stream. If the
// type does not contain fork-specific rules, you can also use EncodeToStream.
//
// Do not use this method with a bytes.Buffer to write into a []byte slice, as that
// will do double the byte copying. For that use case, use EncodeToBytesOnFork.
func EncodeToStreamOnFork(w io.Writer, obj Object, fork Fork) error {
codec := encoderPool.Get().(*Codec)
defer encoderPool.Put(codec)
codec.fork, codec.enc.outWriter = fork, w
switch v := obj.(type) {
case StaticObject:
v.DefineSSZ(codec)
case DynamicObject:
codec.enc.offsetDynamics(v.SizeSSZ(codec.enc.sizer, true))
v.DefineSSZ(codec)
default:
panic(fmt.Sprintf("unsupported type: %T", obj))
}
// Retrieve any errors, zero out the sink and return
err := codec.enc.err
codec.enc.outWriter = nil
codec.enc.err = nil
return err
}
// EncodeToBytes serializes a non-monolithic object into a byte buffer. If the
// type contains fork-specific rules, use EncodeToBytesOnFork.
//
// Don't use this method if you want to then write the buffer into a stream via
// some writer, as that would double the memory use for the temporary buffer.
// For that use case, use EncodeToStream.
func EncodeToBytes(buf []byte, obj Object) error {
return EncodeToBytesOnFork(buf, obj, ForkUnknown)
}
// EncodeToBytesOnFork serializes a monolithic object into a byte buffer. If the
// type does not contain fork-specific rules, you can also use EncodeToBytes.
//
// Don't use this method if you want to then write the buffer into a stream via
// some writer, as that would double the memory use for the temporary buffer.
// For that use case, use EncodeToStreamOnFork.
func EncodeToBytesOnFork(buf []byte, obj Object, fork Fork) error {
// Sanity check that we have enough space to serialize into
if size := SizeOnFork(obj, fork); int(size) > len(buf) {
return fmt.Errorf("%w: buffer %d bytes, object %d bytes", ErrBufferTooSmall, len(buf), size)
}
codec := encoderPool.Get().(*Codec)
defer encoderPool.Put(codec)
codec.fork, codec.enc.outBuffer = fork, buf
switch v := obj.(type) {
case StaticObject:
v.DefineSSZ(codec)
case DynamicObject:
codec.enc.offsetDynamics(v.SizeSSZ(codec.enc.sizer, true))
v.DefineSSZ(codec)
default:
panic(fmt.Sprintf("unsupported type: %T", obj))
}
// Retrieve any errors, zero out the sink and return
err := codec.enc.err
codec.enc.outBuffer = nil
codec.enc.err = nil
return err
}
// DecodeFromStream parses a non-monolithic object with the given size out of a
// stream. If the type contains fork-specific rules, use DecodeFromStreamOnFork.
//
// Do not use this method with a bytes.Buffer to read from a []byte slice, as that
// will double the byte copying. For that use case, use DecodeFromBytes.
func DecodeFromStream(r io.Reader, obj Object, size uint32) error {
return DecodeFromStreamOnFork(r, obj, size, ForkUnknown)
}
// DecodeFromStreamOnFork parses a monolithic object with the given size out of
// a stream. If the type does not contain fork-specific rules, you can also use
// DecodeFromStream.
//
// Do not use this method with a bytes.Buffer to read from a []byte slice, as that
// will double the byte copying. For that use case, use DecodeFromBytesOnFork.
func DecodeFromStreamOnFork(r io.Reader, obj Object, size uint32, fork Fork) error {
// Retrieve a new decoder codec and set its data source
codec := decoderPool.Get().(*Codec)
defer decoderPool.Put(codec)
codec.fork, codec.dec.inReader = fork, r
// Start a decoding round with length enforcement in place
codec.dec.descendIntoSlot(size)
switch v := obj.(type) {
case StaticObject:
v.DefineSSZ(codec)
case DynamicObject:
codec.dec.startDynamics(v.SizeSSZ(codec.dec.sizer, true))
v.DefineSSZ(codec)
codec.dec.flushDynamics()
default:
panic(fmt.Sprintf("unsupported type: %T", obj))
}
codec.dec.ascendFromSlot()
// Retrieve any errors, zero out the source and return
err := codec.dec.err
codec.dec.inReader = nil
codec.dec.err = nil
return err
}
// DecodeFromBytes parses a non-monolithic object from a byte buffer. If the type
// contains fork-specific rules, use DecodeFromBytesOnFork.
//
// Do not use this method if you want to first read the buffer from a stream via
// some reader, as that would double the memory use for the temporary buffer. For
// that use case, use DecodeFromStream instead.
func DecodeFromBytes(blob []byte, obj Object) error {
return DecodeFromBytesOnFork(blob, obj, ForkUnknown)
}
// DecodeFromBytesOnFork parses a monolithic object from a byte buffer. If the
// type does not contain fork-specific rules, you can also use DecodeFromBytes.
//
// Do not use this method if you want to first read the buffer from a stream via
// some reader, as that would double the memory use for the temporary buffer. For
// that use case, use DecodeFromStreamOnFork instead.
func DecodeFromBytesOnFork(blob []byte, obj Object, fork Fork) error {
// Reject decoding from an empty slice
if len(blob) == 0 {
return io.ErrUnexpectedEOF
}
// Retrieve a new decoder codec and set its data source
codec := decoderPool.Get().(*Codec)
defer decoderPool.Put(codec)
codec.fork = fork
codec.dec.inBuffer = blob
codec.dec.inBufEnd = uintptr(unsafe.Pointer(&blob[0])) + uintptr(len(blob))
// Start a decoding round with length enforcement in place
codec.dec.descendIntoSlot(uint32(len(blob)))
switch v := obj.(type) {
case StaticObject:
v.DefineSSZ(codec)
case DynamicObject:
codec.dec.startDynamics(v.SizeSSZ(codec.dec.sizer, true))
v.DefineSSZ(codec)
codec.dec.flushDynamics()
default:
panic(fmt.Sprintf("unsupported type: %T", obj))
}
codec.dec.ascendFromSlot()
// Retrieve any errors, zero out the source and return
err := codec.dec.err
codec.dec.inBufEnd = 0
codec.dec.inBuffer = nil
codec.dec.err = nil
return err
}
// HashSequential computes the merkle root of a non-monolithic object on a single
// thread. This is useful for processing small objects with stable runtime and O(1)
// GC guarantees.
//
// If the type contains fork-specific rules, use HashSequentialOnFork.
func HashSequential(obj Object) [32]byte {
return HashSequentialOnFork(obj, ForkUnknown)
}
// HashSequentialOnFork computes the merkle root of a monolithic object on a single
// thread. This is useful for processing small objects with stable runtime and O(1)
// GC guarantees.
//
// If the type does not contain fork-specific rules, you can also use HashSequential.
func HashSequentialOnFork(obj Object, fork Fork) [32]byte {
codec := hasherPool.Get().(*Codec)
defer hasherPool.Put(codec)
defer codec.has.Reset()
codec.fork = fork
codec.has.descendLayer()
obj.DefineSSZ(codec)
codec.has.ascendLayer(0)
if len(codec.has.chunks) != 1 {
panic(fmt.Sprintf("unfinished hashing: left %v", codec.has.groups))
}
return codec.has.chunks[0]
}
// HashConcurrent computes the merkle root of a non-monolithic object on potentially
// multiple concurrent threads (iff some data segments are large enough to be worth
// it). This is useful for processing large objects, but will place a bigger load on
// your CPU and GC; and might be more variable timing wise depending on other load.
//
// If the type contains fork-specific rules, use HashConcurrentOnFork.
func HashConcurrent(obj Object) [32]byte {
return HashConcurrentOnFork(obj, ForkUnknown)
}
// HashConcurrentOnFork computes the merkle root of a monolithic object on potentially
// multiple concurrent threads (iff some data segments are large enough to be worth
// it). This is useful for processing large objects, but will place a bigger load on
// your CPU and GC; and might be more variable timing wise depending on other load.
//
// If the type does not contain fork-specific rules, you can also use HashConcurrent.
func HashConcurrentOnFork(obj Object, fork Fork) [32]byte {
codec := hasherPool.Get().(*Codec)
defer hasherPool.Put(codec)
defer codec.has.Reset()
codec.fork = fork
codec.has.threads = true
codec.has.descendLayer()
obj.DefineSSZ(codec)
codec.has.ascendLayer(0)
if len(codec.has.chunks) != 1 {
panic(fmt.Sprintf("unfinished hashing: left %v", codec.has.groups))
}
codec.has.threads = false
return codec.has.chunks[0]
}
// Size retrieves the size of a non-monolithic object, independent if it is static
// or dynamic. If the type contains fork-specific rules, use SizeOnFork.
func Size(obj Object) uint32 {
return SizeOnFork(obj, ForkUnknown)
}
// SizeOnFork retrieves the size of a monolithic object, independent if it is
// static or dynamic. If the type does not contain fork-specific rules, you can
// also use Size.
func SizeOnFork(obj Object, fork Fork) uint32 {
sizer := sizerPool.Get().(*Sizer)
defer sizerPool.Put(sizer)
sizer.codec.fork = fork
var size uint32
switch v := obj.(type) {
case StaticObject:
size = v.SizeSSZ(sizer)
case DynamicObject:
size = v.SizeSSZ(sizer, false)
default:
panic(fmt.Sprintf("unsupported type: %T", obj))
}
return size
}