From 54314fc69356555ac72e506819bca07d54e1b4f2 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Wed, 19 Jun 2024 12:09:02 +0200 Subject: [PATCH] feat!: create native blob struct (#74) Co-authored-by: Rootul P --- blob/blob.go | 108 ++++++++++++++++++++--------- blob/blob.pb.go | 95 ++++++++++++------------- blob/blob.proto | 6 +- inclusion/commitment.go | 9 +-- inclusion/commitment_test.go | 9 +-- internal/test/factory.go | 2 +- shares/consts.go | 6 +- shares/parse_sparse_shares.go | 21 +++--- shares/parse_sparse_shares_test.go | 12 ++-- shares/split_sparse_shares.go | 20 ++---- shares/split_sparse_shares_test.go | 6 +- square/builder.go | 14 ++-- 12 files changed, 172 insertions(+), 136 deletions(-) diff --git a/blob/blob.go b/blob/blob.go index dd1bb08..a51b17e 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -3,7 +3,6 @@ package blob import ( - "bytes" "errors" "fmt" "sort" @@ -26,53 +25,83 @@ const ProtoIndexWrapperTypeID = "INDX" // MaxShareVersion is the maximum value a share version can be. See: [shares.MaxShareVersion]. const MaxShareVersion = 127 +// Blob (stands for binary large object) is a core type that represents data +// to be submitted to the Celestia network alongside an accompanying namespace +// and optional signer (for proving the signer of the blob) +type Blob struct { + namespace ns.Namespace + data []byte + shareVersion uint8 + signer []byte +} + // New creates a new coretypes.Blob from the provided data after performing // basic stateless checks over it. -func New(ns ns.Namespace, blob []byte, shareVersion uint8) *Blob { +func New(ns ns.Namespace, data []byte, shareVersion uint8, signer []byte) *Blob { return &Blob{ - NamespaceId: ns.ID(), - Data: blob, - ShareVersion: uint32(shareVersion), - NamespaceVersion: uint32(ns.Version()), - Signer: nil, + namespace: ns, + data: data, + shareVersion: shareVersion, + signer: signer, } } +// NewFromProto creates a Blob from the proto format and performs +// rudimentary validation checks on the structure +func NewFromProto(pb *BlobProto) (*Blob, error) { + if pb.ShareVersion > MaxShareVersion { + return nil, errors.New("share version can not be greater than MaxShareVersion") + } + if pb.NamespaceVersion > ns.NamespaceVersionMax { + return nil, errors.New("namespace version can not be greater than MaxNamespaceVersion") + } + if len(pb.Data) == 0 { + return nil, errors.New("blob data can not be empty") + } + ns, err := ns.New(uint8(pb.NamespaceVersion), pb.NamespaceId) + if err != nil { + return nil, fmt.Errorf("invalid namespace: %w", err) + } + return &Blob{ + namespace: ns, + data: pb.Data, + shareVersion: uint8(pb.ShareVersion), + signer: pb.Signer, + }, nil +} + // Namespace returns the namespace of the blob -func (b *Blob) Namespace() (ns.Namespace, error) { - return ns.NewFromBytes(b.RawNamespace()) +func (b *Blob) Namespace() ns.Namespace { + return b.namespace } -// RawNamespace returns the namespace of the blob -func (b *Blob) RawNamespace() []byte { - namespace := make([]byte, ns.NamespaceSize) - namespace[ns.VersionIndex] = uint8(b.NamespaceVersion) - copy(namespace[ns.NamespaceVersionSize:], b.NamespaceId) - return namespace +// ShareVersion returns the share version of the blob +func (b *Blob) ShareVersion() uint8 { + return b.shareVersion } -// Validate runs a stateless validity check on the form of the struct. -func (b *Blob) Validate() error { - if b == nil { - return errors.New("nil blob") - } - if len(b.NamespaceId) != ns.NamespaceIDSize { - return fmt.Errorf("namespace id must be %d bytes", ns.NamespaceIDSize) - } - if b.ShareVersion > MaxShareVersion { - return errors.New("share version can not be greater than MaxShareVersion") - } - if b.NamespaceVersion > ns.NamespaceVersionMax { - return errors.New("namespace version can not be greater than MaxNamespaceVersion") - } - if len(b.Data) == 0 { - return errors.New("blob data can not be empty") +// Signer returns the signer of the blob +func (b *Blob) Signer() []byte { + return b.signer +} + +// Data returns the data of the blob +func (b *Blob) Data() []byte { + return b.data +} + +func (b *Blob) ToProto() *BlobProto { + return &BlobProto{ + NamespaceId: b.namespace.ID(), + NamespaceVersion: uint32(b.namespace.Version()), + ShareVersion: uint32(b.shareVersion), + Data: b.data, + Signer: b.signer, } - return nil } func (b *Blob) Compare(other *Blob) int { - return bytes.Compare(b.RawNamespace(), other.RawNamespace()) + return b.namespace.Compare(other.namespace) } // UnmarshalBlobTx attempts to unmarshal a transaction into blob transaction. If an @@ -104,14 +133,25 @@ func UnmarshalBlobTx(tx []byte) (*BlobTx, bool) { // NOTE: Any checks on the blobs or the transaction must be performed in the // application func MarshalBlobTx(tx []byte, blobs ...*Blob) ([]byte, error) { + if len(blobs) == 0 { + return nil, errors.New("at least one blob must be provided") + } bTx := &BlobTx{ Tx: tx, - Blobs: blobs, + Blobs: blobsToProto(blobs), TypeId: ProtoBlobTxTypeID, } return proto.Marshal(bTx) } +func blobsToProto(blobs []*Blob) []*BlobProto { + pb := make([]*BlobProto, len(blobs)) + for i, b := range blobs { + pb[i] = b.ToProto() + } + return pb +} + // Sort sorts the blobs by their namespace. func Sort(blobs []*Blob) { sort.SliceStable(blobs, func(i, j int) bool { diff --git a/blob/blob.pb.go b/blob/blob.pb.go index 626b24a..0c7b4d9 100644 --- a/blob/blob.pb.go +++ b/blob/blob.pb.go @@ -20,11 +20,11 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Blob (named after binary large object) is a chunk of data submitted by a user +// BlobProto is the protobuf representation of a blob (binary large object) // to be published to the Celestia blockchain. The data of a Blob is published // to a namespace and is encoded into shares based on the format specified by // share_version. -type Blob struct { +type BlobProto struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -38,8 +38,8 @@ type Blob struct { Signer []byte `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"` } -func (x *Blob) Reset() { - *x = Blob{} +func (x *BlobProto) Reset() { + *x = BlobProto{} if protoimpl.UnsafeEnabled { mi := &file_blob_blob_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -47,13 +47,13 @@ func (x *Blob) Reset() { } } -func (x *Blob) String() string { +func (x *BlobProto) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Blob) ProtoMessage() {} +func (*BlobProto) ProtoMessage() {} -func (x *Blob) ProtoReflect() protoreflect.Message { +func (x *BlobProto) ProtoReflect() protoreflect.Message { mi := &file_blob_blob_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -65,40 +65,40 @@ func (x *Blob) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Blob.ProtoReflect.Descriptor instead. -func (*Blob) Descriptor() ([]byte, []int) { +// Deprecated: Use BlobProto.ProtoReflect.Descriptor instead. +func (*BlobProto) Descriptor() ([]byte, []int) { return file_blob_blob_proto_rawDescGZIP(), []int{0} } -func (x *Blob) GetNamespaceId() []byte { +func (x *BlobProto) GetNamespaceId() []byte { if x != nil { return x.NamespaceId } return nil } -func (x *Blob) GetData() []byte { +func (x *BlobProto) GetData() []byte { if x != nil { return x.Data } return nil } -func (x *Blob) GetShareVersion() uint32 { +func (x *BlobProto) GetShareVersion() uint32 { if x != nil { return x.ShareVersion } return 0 } -func (x *Blob) GetNamespaceVersion() uint32 { +func (x *BlobProto) GetNamespaceVersion() uint32 { if x != nil { return x.NamespaceVersion } return 0 } -func (x *Blob) GetSigner() []byte { +func (x *BlobProto) GetSigner() []byte { if x != nil { return x.Signer } @@ -113,9 +113,9 @@ type BlobTx struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` - Blobs []*Blob `protobuf:"bytes,2,rep,name=blobs,proto3" json:"blobs,omitempty"` - TypeId string `protobuf:"bytes,3,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + Blobs []*BlobProto `protobuf:"bytes,2,rep,name=blobs,proto3" json:"blobs,omitempty"` + TypeId string `protobuf:"bytes,3,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` } func (x *BlobTx) Reset() { @@ -157,7 +157,7 @@ func (x *BlobTx) GetTx() []byte { return nil } -func (x *BlobTx) GetBlobs() []*Blob { +func (x *BlobTx) GetBlobs() []*BlobProto { if x != nil { return x.Blobs } @@ -240,32 +240,33 @@ var File_blob_blob_proto protoreflect.FileDescriptor var file_blob_blob_proto_rawDesc = []byte{ 0x0a, 0x0f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x08, 0x70, 0x6b, 0x67, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x22, 0xa7, 0x01, 0x0a, 0x04, - 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x73, - 0x68, 0x61, 0x72, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x2b, 0x0a, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, - 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, 0x57, 0x0a, 0x06, 0x42, 0x6c, 0x6f, 0x62, 0x54, 0x78, 0x12, - 0x0e, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x74, 0x78, 0x12, - 0x24, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x05, - 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x79, 0x70, 0x65, 0x49, 0x64, 0x22, 0x5c, - 0x0a, 0x0c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x0e, - 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x74, 0x78, 0x12, 0x23, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x65, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x79, 0x70, 0x65, 0x49, 0x64, 0x42, 0x27, 0x5a, 0x25, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6c, 0x65, 0x73, - 0x74, 0x69, 0x61, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x6f, 0x2d, 0x73, 0x71, 0x75, 0x61, 0x72, 0x65, - 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x12, 0x08, 0x70, 0x6b, 0x67, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x22, 0xac, 0x01, 0x0a, 0x09, + 0x42, 0x6c, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x65, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x10, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, 0x5c, 0x0a, 0x06, 0x42, 0x6c, + 0x6f, 0x62, 0x54, 0x78, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x02, 0x74, 0x78, 0x12, 0x29, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x2e, 0x42, + 0x6c, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x12, + 0x17, 0x0a, 0x07, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x74, 0x79, 0x70, 0x65, 0x49, 0x64, 0x22, 0x5c, 0x0a, 0x0c, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x74, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, + 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x0c, 0x73, 0x68, 0x61, 0x72, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x17, 0x0a, + 0x07, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x74, 0x79, 0x70, 0x65, 0x49, 0x64, 0x42, 0x27, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6c, 0x65, 0x73, 0x74, 0x69, 0x61, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x6f, 0x2d, 0x73, 0x71, 0x75, 0x61, 0x72, 0x65, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -282,12 +283,12 @@ func file_blob_blob_proto_rawDescGZIP() []byte { var file_blob_blob_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_blob_blob_proto_goTypes = []any{ - (*Blob)(nil), // 0: pkg.blob.Blob + (*BlobProto)(nil), // 0: pkg.blob.BlobProto (*BlobTx)(nil), // 1: pkg.blob.BlobTx (*IndexWrapper)(nil), // 2: pkg.blob.IndexWrapper } var file_blob_blob_proto_depIdxs = []int32{ - 0, // 0: pkg.blob.BlobTx.blobs:type_name -> pkg.blob.Blob + 0, // 0: pkg.blob.BlobTx.blobs:type_name -> pkg.blob.BlobProto 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -302,7 +303,7 @@ func file_blob_blob_proto_init() { } if !protoimpl.UnsafeEnabled { file_blob_blob_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Blob); i { + switch v := v.(*BlobProto); i { case 0: return &v.state case 1: diff --git a/blob/blob.proto b/blob/blob.proto index 35ce242..2c55376 100644 --- a/blob/blob.proto +++ b/blob/blob.proto @@ -3,11 +3,11 @@ package pkg.blob; option go_package = "github.com/celestiaorg/go-square/blob"; -// Blob (named after binary large object) is a chunk of data submitted by a user +// BlobProto is the protobuf representation of a blob (binary large object) // to be published to the Celestia blockchain. The data of a Blob is published // to a namespace and is encoded into shares based on the format specified by // share_version. -message Blob { +message BlobProto { bytes namespace_id = 1; bytes data = 2; uint32 share_version = 3; @@ -22,7 +22,7 @@ message Blob { // using the relevant MsgPayForBlobs that is signed over in the encoded sdk.Tx. message BlobTx { bytes tx = 1; - repeated Blob blobs = 2; + repeated BlobProto blobs = 2; string type_id = 3; } diff --git a/inclusion/commitment.go b/inclusion/commitment.go index 4583dcf..ace9287 100644 --- a/inclusion/commitment.go +++ b/inclusion/commitment.go @@ -17,14 +17,6 @@ type MerkleRootFn func([][]byte) []byte // [data square layout rationale]: ../../specs/src/specs/data_square_layout.md // [blob share commitment rules]: ../../specs/src/specs/data_square_layout.md#blob-share-commitment-rules func CreateCommitment(blob *blob.Blob, merkleRootFn MerkleRootFn, subtreeRootThreshold int) ([]byte, error) { - if err := blob.Validate(); err != nil { - return nil, err - } - namespace, err := blob.Namespace() - if err != nil { - return nil, err - } - shares, err := sh.SplitBlobs(blob) if err != nil { return nil, err @@ -46,6 +38,7 @@ func CreateCommitment(blob *blob.Blob, merkleRootFn MerkleRootFn, subtreeRootThr cursor += treeSize } + namespace := blob.Namespace() // create the commitments by pushing each leaf set onto an NMT subTreeRoots := make([][]byte, len(leafSets)) for i, set := range leafSets { diff --git a/inclusion/commitment_test.go b/inclusion/commitment_test.go index d77d34e..7aec190 100644 --- a/inclusion/commitment_test.go +++ b/inclusion/commitment_test.go @@ -86,17 +86,12 @@ func TestCreateCommitment(t *testing.T) { namespace: ns1, blob: bytes.Repeat([]byte{0xFF}, shares.AvailableBytesFromSparseShares(2)), expectErr: true, - shareVersion: uint8(1), // unsupported share version + shareVersion: uint8(2), // unsupported share version }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - blob := &blob.Blob{ - NamespaceId: tt.namespace.ID(), - Data: tt.blob, - ShareVersion: uint32(tt.shareVersion), - NamespaceVersion: uint32(tt.namespace.Version()), - } + blob := blob.New(tt.namespace, tt.blob, tt.shareVersion, nil) res, err := inclusion.CreateCommitment(blob, twoLeafMerkleRoot, defaultSubtreeRootThreshold) if tt.expectErr { assert.Error(t, err) diff --git a/internal/test/factory.go b/internal/test/factory.go index d0f1a7a..b8abb80 100644 --- a/internal/test/factory.go +++ b/internal/test/factory.go @@ -44,7 +44,7 @@ func GenerateBlobTxWithNamespace(namespaces []namespace.Namespace, blobSizes []i panic("number of namespaces should match number of blob sizes") } for i, size := range blobSizes { - blobs[i] = blob.New(namespaces[i], RandomBytes(size), shares.DefaultShareVersion) + blobs[i] = blob.New(namespaces[i], RandomBytes(size), shares.DefaultShareVersion, nil) } blobTx, err := blob.MarshalBlobTx(MockPFB(toUint32(blobSizes)), blobs...) if err != nil { diff --git a/shares/consts.go b/shares/consts.go index 31baa02..46cc802 100644 --- a/shares/consts.go +++ b/shares/consts.go @@ -19,6 +19,10 @@ const ( // ShareVersionZero is the first share version format. ShareVersionZero = uint8(0) + // ShareVersionOne is the second share version format. + // It requires that a signer is included in the first share in the sequence. + ShareVersionOne = uint8(1) + // DefaultShareVersion is the defacto share version. Use this if you are // unsure of which version to use. DefaultShareVersion = ShareVersionZero @@ -60,4 +64,4 @@ const ( ) // SupportedShareVersions is a list of supported share versions. -var SupportedShareVersions = []uint8{ShareVersionZero} +var SupportedShareVersions = []uint8{ShareVersionZero, ShareVersionOne} diff --git a/shares/parse_sparse_shares.go b/shares/parse_sparse_shares.go index 7927f0a..b0a5adb 100644 --- a/shares/parse_sparse_shares.go +++ b/shares/parse_sparse_shares.go @@ -5,11 +5,14 @@ import ( "fmt" "github.com/celestiaorg/go-square/blob" + ns "github.com/celestiaorg/go-square/namespace" ) type sequence struct { - blob *blob.Blob - sequenceLen uint32 + ns ns.Namespace + shareVersion uint8 + data []byte + sequenceLen uint32 } // parseSparseShares iterates through rawShares and parses out individual @@ -56,27 +59,29 @@ func parseSparseShares(shares []Share, supportedShareVersions []uint8) (blobs [] if err != nil { return nil, err } - blob := blob.New(ns, data, version) sequences = append(sequences, sequence{ - blob: blob, - sequenceLen: sequenceLen, + ns: ns, + shareVersion: version, + data: data, + sequenceLen: sequenceLen, }) } else { // continuation share if len(sequences) == 0 { return nil, fmt.Errorf("continuation share %v without a sequence start share", share) } + // FIXME: it doesn't look like we check whether all the shares belong to the same namespace. prev := &sequences[len(sequences)-1] data, err := share.RawData() if err != nil { return nil, err } - prev.blob.Data = append(prev.blob.Data, data...) + prev.data = append(prev.data, data...) } } for _, sequence := range sequences { // trim any padding from the end of the sequence - sequence.blob.Data = sequence.blob.Data[:sequence.sequenceLen] - blobs = append(blobs, sequence.blob) + sequence.data = sequence.data[:sequence.sequenceLen] + blobs = append(blobs, blob.New(sequence.ns, sequence.data, sequence.shareVersion, nil)) } return blobs, nil diff --git a/shares/parse_sparse_shares_test.go b/shares/parse_sparse_shares_test.go index d334723..b47c60b 100644 --- a/shares/parse_sparse_shares_test.go +++ b/shares/parse_sparse_shares_test.go @@ -70,8 +70,8 @@ func Test_parseSparseShares(t *testing.T) { // check that the namespaces and data are the same for i := 0; i < len(blobs); i++ { - assert.Equal(t, blobs[i].NamespaceId, parsedBlobs[i].NamespaceId, "parsed blob namespace does not match") - assert.Equal(t, blobs[i].Data, parsedBlobs[i].Data, "parsed blob data does not match") + assert.Equal(t, blobs[i].Namespace(), parsedBlobs[i].Namespace(), "parsed blob namespace does not match") + assert.Equal(t, blobs[i].Data(), parsedBlobs[i].Data(), "parsed blob data does not match") } }) @@ -87,8 +87,8 @@ func Test_parseSparseShares(t *testing.T) { // check that the namespaces and data are the same for i := 0; i < len(blobs); i++ { - assert.Equal(t, blobs[i].NamespaceId, parsedBlobs[i].NamespaceId) - assert.Equal(t, blobs[i].Data, parsedBlobs[i].Data) + assert.Equal(t, blobs[i].Namespace(), parsedBlobs[i].Namespace()) + assert.Equal(t, blobs[i].Data(), parsedBlobs[i].Data()) } }) } @@ -161,7 +161,7 @@ func generateRandomBlobWithNamespace(namespace ns.Namespace, size int) *blob.Blo if err != nil { panic(err) } - return blob.New(namespace, data, ShareVersionZero) + return blob.New(namespace, data, ShareVersionZero, nil) } func generateRandomBlob(dataSize int) *blob.Blob { @@ -173,7 +173,7 @@ func GenerateRandomlySizedBlobs(count, maxBlobSize int) []*blob.Blob { blobs := make([]*blob.Blob, count) for i := 0; i < count; i++ { blobs[i] = generateRandomBlob(rand.Intn(maxBlobSize)) - if len(blobs[i].Data) == 0 { + if len(blobs[i].Data()) == 0 { i-- } } diff --git a/shares/split_sparse_shares.go b/shares/split_sparse_shares.go index ed6f26b..9cbc6ad 100644 --- a/shares/split_sparse_shares.go +++ b/shares/split_sparse_shares.go @@ -22,22 +22,14 @@ func NewSparseShareSplitter() *SparseShareSplitter { // Write writes the provided blob to this sparse share splitter. It returns an // error or nil if no error is encountered. func (sss *SparseShareSplitter) Write(blob *blob.Blob) error { - if err := blob.Validate(); err != nil { - return err - } - - if !slices.Contains(SupportedShareVersions, uint8(blob.ShareVersion)) { - return fmt.Errorf("unsupported share version: %d", blob.ShareVersion) + if !slices.Contains(SupportedShareVersions, blob.ShareVersion()) { + return fmt.Errorf("unsupported share version: %d", blob.ShareVersion()) } - rawData := blob.Data - blobNamespace, err := blob.Namespace() - if err != nil { - return err - } + rawData := blob.Data() + blobNamespace := blob.Namespace() - // First share (note by validating the blob we can safely cast the share version to uint8) - b, err := NewBuilder(blobNamespace, uint8(blob.ShareVersion), true) + b, err := NewBuilder(blobNamespace, blob.ShareVersion(), true) if err != nil { return err } @@ -59,7 +51,7 @@ func (sss *SparseShareSplitter) Write(blob *blob.Blob) error { } sss.shares = append(sss.shares, *share) - b, err = NewBuilder(blobNamespace, uint8(blob.ShareVersion), false) + b, err = NewBuilder(blobNamespace, blob.ShareVersion(), false) if err != nil { return err } diff --git a/shares/split_sparse_shares_test.go b/shares/split_sparse_shares_test.go index 197adb6..8546a03 100644 --- a/shares/split_sparse_shares_test.go +++ b/shares/split_sparse_shares_test.go @@ -15,8 +15,8 @@ func TestSparseShareSplitter(t *testing.T) { ns1 := namespace.MustNewV0(bytes.Repeat([]byte{1}, namespace.NamespaceVersionZeroIDSize)) ns2 := namespace.MustNewV0(bytes.Repeat([]byte{2}, namespace.NamespaceVersionZeroIDSize)) - blob1 := blob.New(ns1, []byte("data1"), ShareVersionZero) - blob2 := blob.New(ns2, []byte("data2"), ShareVersionZero) + blob1 := blob.New(ns1, []byte("data1"), ShareVersionZero, nil) + blob2 := blob.New(ns2, []byte("data2"), ShareVersionZero, nil) sss := NewSparseShareSplitter() err := sss.Write(blob1) @@ -56,5 +56,5 @@ func TestWriteNamespacePaddingShares(t *testing.T) { } func newBlob(ns namespace.Namespace, shareVersion uint8) *blob.Blob { - return blob.New(ns, []byte("data"), shareVersion) + return blob.New(ns, []byte("data"), shareVersion, nil) } diff --git a/square/builder.go b/square/builder.go index 9fea7aa..cd75d17 100644 --- a/square/builder.go +++ b/square/builder.go @@ -96,7 +96,13 @@ func (b *Builder) AppendBlobTx(blobTx *blob.BlobTx) bool { // create a new blob element for each blob and track the worst-case share count blobElements := make([]*Element, len(blobTx.Blobs)) maxBlobShareCount := 0 - for idx, blob := range blobTx.Blobs { + for idx, protoBlob := range blobTx.Blobs { + blob, err := blob.NewFromProto(protoBlob) + if err != nil { + // TODO: we should look at having a go native BlobTx type + // that we have already verified instead of doing it twice here + panic(fmt.Sprintf("invalid blob %d: %v", idx, err)) + } blobElements[idx] = newElement(blob, len(b.Pfbs), idx, b.subtreeRootThreshold) maxBlobShareCount += blobElements[idx].maxShareOffset() } @@ -128,8 +134,8 @@ func (b *Builder) Export() (Square, error) { // of blobs within a namespace because b.Blobs are already ordered by tx // priority. sort.SliceStable(b.Blobs, func(i, j int) bool { - ns1 := append([]byte{byte(b.Blobs[i].Blob.NamespaceVersion)}, b.Blobs[i].Blob.NamespaceId...) - ns2 := append([]byte{byte(b.Blobs[j].Blob.NamespaceVersion)}, b.Blobs[j].Blob.NamespaceId...) + ns1 := b.Blobs[i].Blob.Namespace().Bytes() + ns2 := b.Blobs[j].Blob.Namespace().Bytes() return bytes.Compare(ns1, ns2) < 0 }) @@ -372,7 +378,7 @@ type Element struct { } func newElement(blob *blob.Blob, pfbIndex, blobIndex, subtreeRootThreshold int) *Element { - numShares := shares.SparseSharesNeeded(uint32(len(blob.Data))) + numShares := shares.SparseSharesNeeded(uint32(len(blob.Data()))) return &Element{ Blob: blob, PfbIndex: pfbIndex,