Skip to content

Commit

Permalink
refactor!: move all blob share commitment code to the inclusion packa…
Browse files Browse the repository at this point in the history
…ge (#2770)

## Overview

We currently have code relating to creating and proving commitment
across the inclusion, x/blob/types, and shares package. This api
breaking refactor moves all of the commitment code to the inclusion
package.

The reasoning behind this is that it will eventually allow for users to
import commitment creation and proving code while not having to import
the blob module, tendermint, or the sdk.

The reasoning behind keeping it in the inclusion instead of shares is
that entire reason for making a commitment is to prove inclusion. So the
name seems to fit better.

The reasoning behind note keeping it in the blob package was that it was
difficult to not have an import cycle since this logic requires the
share splitting logic for blobs.

## Checklist

- [x] New and updated code has appropriate documentation
- [x] New and updated code has new and/or updated testing
- [x] Required CI checks are passing
- [x] Visual proof for any user facing features like CLI or
documentation updates
- [ ] Linked issues closed with keywords
  • Loading branch information
evan-forbes authored Oct 27, 2023
1 parent 36ecb01 commit 73942bf
Show file tree
Hide file tree
Showing 13 changed files with 279 additions and 225 deletions.
28 changes: 25 additions & 3 deletions pkg/da/data_availability_header.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,15 @@ import (
"bytes"
"errors"
"fmt"
"math"

"github.com/celestiaorg/rsmt2d"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/types"
"golang.org/x/exp/constraints"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/shares"
"github.com/celestiaorg/celestia-app/pkg/square"
"github.com/celestiaorg/celestia-app/pkg/wrapper"
daproto "github.com/celestiaorg/celestia-app/proto/celestia/core/v1/da"
)
Expand Down Expand Up @@ -66,7 +67,7 @@ func ExtendShares(s [][]byte) (*rsmt2d.ExtendedDataSquare, error) {
if !shares.IsPowerOfTwo(len(s)) {
return nil, fmt.Errorf("number of shares is not a power of 2: got %d", len(s))
}
squareSize := square.Size(len(s))
squareSize := SquareSize(len(s))

// here we construct a tree
// Note: uses the nmt wrapper to construct the tree.
Expand Down Expand Up @@ -190,5 +191,26 @@ func MinDataAvailabilityHeader() DataAvailabilityHeader {

// MinShares returns one tail-padded share.
func MinShares() [][]byte {
return shares.ToBytes(square.EmptySquare())
return shares.ToBytes(EmptySquareShares())
}

// EmptySquare is a copy of the function defined in the square package to avoid
// a circular dependency. TODO deduplicate
func EmptySquareShares() []shares.Share {
return shares.TailPaddingShares(appconsts.MinShareCount)
}

// SquareSize is a copy of the function defined in the square package to avoid
// a circular dependency. TODO deduplicate
func SquareSize(len int) int {
return RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(len)))))
}

// RoundUpPowerOfTwo returns the next power of two greater than or equal to input.
func RoundUpPowerOfTwo[I constraints.Integer](input I) I {
var result I = 1
for result < input {
result = result << 1
}
return result
}
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
package shares
package inclusion

import (
"math"

"github.com/celestiaorg/celestia-app/pkg/da"
"golang.org/x/exp/constraints"
)

Expand Down Expand Up @@ -73,7 +74,7 @@ func roundUpByMultipleOf(cursor, v int) int {
// BlobMinSquareSize returns the minimum square size that can contain shareCount
// number of shares.
func BlobMinSquareSize(shareCount int) int {
return RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(shareCount)))))
return da.RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(shareCount)))))
}

// SubTreeWidth determines the maximum number of leaves per subtree in the share
Expand All @@ -93,7 +94,7 @@ func SubTreeWidth(shareCount, subtreeRootThreshold int) int {

// use a power of two equal to or larger than the multiple of the subtree
// root threshold
s = RoundUpPowerOfTwo(s)
s = da.RoundUpPowerOfTwo(s)

// use the minimum of the subtree width and the min square size, this
// gurarantees that a valid value is returned
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
package shares
package inclusion

import (
"fmt"
"testing"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/da"
"github.com/stretchr/testify/assert"
)

Expand Down Expand Up @@ -232,7 +233,7 @@ func TestNextShareIndex(t *testing.T) {
name: "at threshold",
cursor: 11,
blobLen: appconsts.DefaultSubtreeRootThreshold,
squareSize: RoundUpPowerOfTwo(appconsts.DefaultSubtreeRootThreshold),
squareSize: da.RoundUpPowerOfTwo(appconsts.DefaultSubtreeRootThreshold),
expectedIndex: 11,
},
{
Expand Down
114 changes: 114 additions & 0 deletions pkg/inclusion/commitment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
package inclusion

import (
"crypto/sha256"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/blob"
appns "github.com/celestiaorg/celestia-app/pkg/namespace"
appshares "github.com/celestiaorg/celestia-app/pkg/shares"
"github.com/celestiaorg/nmt"
"github.com/tendermint/tendermint/crypto/merkle"
)

// CreateCommitment generates the share commitment for a given blob.
// See [data square layout rationale] and [blob share commitment rules].
//
// [data square layout rationale]: ../../specs/src/specs/data_square_layout.md
// [blob share commitment rules]: ../../specs/src/specs/data_square_layout.md#blob-share-commitment-rules
func CreateCommitment(blob *blob.Blob) ([]byte, error) {
if err := blob.Validate(); err != nil {
return nil, err
}
namespace := blob.Namespace()

shares, err := appshares.SplitBlobs(blob)
if err != nil {
return nil, err
}

// the commitment is the root of a merkle mountain range with max tree size
// determined by the number of roots required to create a share commitment
// over that blob. The size of the tree is only increased if the number of
// subtree roots surpasses a constant threshold.
subTreeWidth := SubTreeWidth(len(shares), appconsts.DefaultSubtreeRootThreshold)
treeSizes, err := MerkleMountainRangeSizes(uint64(len(shares)), uint64(subTreeWidth))
if err != nil {
return nil, err
}
leafSets := make([][][]byte, len(treeSizes))
cursor := uint64(0)
for i, treeSize := range treeSizes {
leafSets[i] = appshares.ToBytes(shares[cursor : cursor+treeSize])
cursor = cursor + treeSize
}

// create the commitments by pushing each leaf set onto an nmt
subTreeRoots := make([][]byte, len(leafSets))
for i, set := range leafSets {
// create the nmt todo(evan) use nmt wrapper
tree := nmt.New(sha256.New(), nmt.NamespaceIDSize(appns.NamespaceSize), nmt.IgnoreMaxNamespace(true))
for _, leaf := range set {
// the namespace must be added again here even though it is already
// included in the leaf to ensure that the hash will match that of
// the nmt wrapper (pkg/wrapper). Each namespace is added to keep
// the namespace in the share, and therefore the parity data, while
// also allowing for the manual addition of the parity namespace to
// the parity data.
nsLeaf := make([]byte, 0)
nsLeaf = append(nsLeaf, namespace.Bytes()...)
nsLeaf = append(nsLeaf, leaf...)

err = tree.Push(nsLeaf)
if err != nil {
return nil, err
}
}
// add the root
root, err := tree.Root()
if err != nil {
return nil, err
}
subTreeRoots[i] = root
}
return merkle.HashFromByteSlices(subTreeRoots), nil
}

func CreateCommitments(blobs []*blob.Blob) ([][]byte, error) {
commitments := make([][]byte, len(blobs))
for i, blob := range blobs {
commitment, err := CreateCommitment(blob)
if err != nil {
return nil, err
}
commitments[i] = commitment
}
return commitments, nil
}

// MerkleMountainRangeSizes returns the sizes (number of leaf nodes) of the
// trees in a merkle mountain range constructed for a given totalSize and
// maxTreeSize.
//
// https://docs.grin.mw/wiki/chain-state/merkle-mountain-range/
// https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md
func MerkleMountainRangeSizes(totalSize, maxTreeSize uint64) ([]uint64, error) {
var treeSizes []uint64

for totalSize != 0 {
switch {
case totalSize >= maxTreeSize:
treeSizes = append(treeSizes, maxTreeSize)
totalSize = totalSize - maxTreeSize
case totalSize < maxTreeSize:
treeSize, err := appshares.RoundDownPowerOfTwo(totalSize)
if err != nil {
return treeSizes, err
}
treeSizes = append(treeSizes, treeSize)
totalSize = totalSize - treeSize
}
}

return treeSizes, nil
}
108 changes: 108 additions & 0 deletions pkg/inclusion/commitment_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
package inclusion_test

import (
"bytes"
"testing"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/blob"
"github.com/celestiaorg/celestia-app/pkg/inclusion"
appns "github.com/celestiaorg/celestia-app/pkg/namespace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func Test_MerkleMountainRangeHeights(t *testing.T) {
type test struct {
totalSize uint64
squareSize uint64
expected []uint64
}
tests := []test{
{
totalSize: 11,
squareSize: 4,
expected: []uint64{4, 4, 2, 1},
},
{
totalSize: 2,
squareSize: 64,
expected: []uint64{2},
},
{
totalSize: 64,
squareSize: 8,
expected: []uint64{8, 8, 8, 8, 8, 8, 8, 8},
},
// Height
// 3 x x
// / \ / \
// / \ / \
// / \ / \
// / \ / \
// 2 x x x x
// / \ / \ / \ / \
// 1 x x x x x x x x x
// / \ / \ / \ / \ / \ / \ / \ / \ / \
// 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
{
totalSize: 19,
squareSize: 8,
expected: []uint64{8, 8, 2, 1},
},
}
for _, tt := range tests {
res, err := inclusion.MerkleMountainRangeSizes(tt.totalSize, tt.squareSize)
require.NoError(t, err)
assert.Equal(t, tt.expected, res)
}
}

// TestCreateCommitment will fail if a change is made to share encoding or how
// the commitment is calculated. If this is the case, the expected commitment
// bytes will need to be updated.
func TestCreateCommitment(t *testing.T) {
ns1 := appns.MustNewV0(bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize))

type test struct {
name string
namespace appns.Namespace
blob []byte
expected []byte
expectErr bool
shareVersion uint8
}
tests := []test{
{
name: "blob of 3 shares succeeds",
namespace: ns1,
blob: bytes.Repeat([]byte{0xFF}, 3*appconsts.ShareSize),
expected: []byte{0x3b, 0x9e, 0x78, 0xb6, 0x64, 0x8e, 0xc1, 0xa2, 0x41, 0x92, 0x5b, 0x31, 0xda, 0x2e, 0xcb, 0x50, 0xbf, 0xc6, 0xf4, 0xad, 0x55, 0x2d, 0x32, 0x79, 0x92, 0x8c, 0xa1, 0x3e, 0xbe, 0xba, 0x8c, 0x2b},
shareVersion: appconsts.ShareVersionZero,
},
{
name: "blob with unsupported share version should return error",
namespace: ns1,
blob: bytes.Repeat([]byte{0xFF}, 12*appconsts.ShareSize),
expectErr: true,
shareVersion: uint8(1), // unsupported share version
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blob := &blob.Blob{
NamespaceId: tt.namespace.ID,
Data: tt.blob,
ShareVersion: uint32(tt.shareVersion),
NamespaceVersion: uint32(tt.namespace.Version),
}
res, err := inclusion.CreateCommitment(blob)
if tt.expectErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
assert.Equal(t, tt.expected, res)
})
}
}
6 changes: 2 additions & 4 deletions pkg/inclusion/paths.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ package inclusion

import (
"math"

"github.com/celestiaorg/celestia-app/pkg/shares"
)

type path struct {
Expand All @@ -14,7 +12,7 @@ type path struct {
// calculateCommitmentPaths calculates all of the paths to subtree roots needed to
// create the commitment for a given blob.
func calculateCommitmentPaths(squareSize, start, blobShareLen, subtreeRootThreshold int) []path {
start = shares.NextShareIndex(start, blobShareLen, subtreeRootThreshold)
start = NextShareIndex(start, blobShareLen, subtreeRootThreshold)
startRow, endRow := start/squareSize, (start+blobShareLen-1)/squareSize
normalizedStartIndex := start % squareSize
normalizedEndIndex := (start + blobShareLen) - endRow*squareSize
Expand All @@ -32,7 +30,7 @@ func calculateCommitmentPaths(squareSize, start, blobShareLen, subtreeRootThresh
// subTreeRootMaxDepth is the maximum depth of a subtree root that was
// used to generate the commitment. The height is based on the
// SubtreeRootThreshold. See ADR-013 for more details.
subTreeRootMaxDepth := int(math.Log2(float64(shares.SubTreeWidth(blobShareLen, subtreeRootThreshold))))
subTreeRootMaxDepth := int(math.Log2(float64(SubTreeWidth(blobShareLen, subtreeRootThreshold))))
minDepth := maxDepth - subTreeRootMaxDepth
coords := calculateSubTreeRootCoordinates(maxDepth, minDepth, start, end)
for _, c := range coords {
Expand Down
7 changes: 4 additions & 3 deletions pkg/square/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/blob"
"github.com/celestiaorg/celestia-app/pkg/inclusion"
"github.com/celestiaorg/celestia-app/pkg/namespace"
"github.com/celestiaorg/celestia-app/pkg/shares"
"github.com/tendermint/tendermint/pkg/consts"
Expand Down Expand Up @@ -125,7 +126,7 @@ func (b *Builder) Export() (Square, error) {
// calculate the square size.
// NOTE: A future optimization could be to recalculate the currentSize based on the actual
// interblob padding used when the blobs are correctly ordered instead of using worst case padding.
ss := shares.BlobMinSquareSize(b.currentSize)
ss := inclusion.BlobMinSquareSize(b.currentSize)

// Sort the blobs by namespace. This uses SliceStable to preserve the order
// of blobs within a namespace because b.Blobs are already ordered by tx
Expand All @@ -150,7 +151,7 @@ func (b *Builder) Export() (Square, error) {
for i, element := range b.Blobs {
// NextShareIndex returned where the next blob should start so as to comply with the share commitment rules
// We fill out the remaining
cursor = shares.NextShareIndex(cursor, element.NumShares, b.subtreeRootThreshold)
cursor = inclusion.NextShareIndex(cursor, element.NumShares, b.subtreeRootThreshold)
if i == 0 {
nonReservedStart = cursor
}
Expand Down Expand Up @@ -400,7 +401,7 @@ func newElement(blob *blob.Blob, pfbIndex, blobIndex, subtreeRootThreshold int)
//
// Note that the padding would actually belong to the namespace of the transaction before it, but
// this makes no difference to the total share size.
MaxPadding: shares.SubTreeWidth(numShares, subtreeRootThreshold) - 1,
MaxPadding: inclusion.SubTreeWidth(numShares, subtreeRootThreshold) - 1,
}
}

Expand Down
Loading

0 comments on commit 73942bf

Please sign in to comment.