From ca41676316975db1be541fa9cdf8816fdc5ad28f Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 19 Jan 2024 13:32:17 +0100 Subject: [PATCH 01/55] feat: multiply s1, s2 by alpha --- backend/plonk/bn254/prove.go | 52 ++++++++++++++---------------------- 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/backend/plonk/bn254/prove.go b/backend/plonk/bn254/prove.go index 2bb6ea33e3..0cccb65bc3 100644 --- a/backend/plonk/bn254/prove.go +++ b/backend/plonk/bn254/prove.go @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Code generated by gnark DO NOT EDIT - package plonk import ( @@ -101,12 +99,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of h1 + zetaⁿ⁺²*h2 + zeta²⁽ⁿ⁺²⁾*h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -151,7 +149,7 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) @@ -268,8 +266,8 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -500,8 +498,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -1289,16 +1287,17 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)**(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) // + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1306,11 +1305,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1321,7 +1320,7 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)*α // third part L₁(ζ)*α²*Z var lagrangeZeta, one, den, frNbElmt fr.Element @@ -1342,6 +1341,9 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1353,35 +1355,21 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - t0.Mul(&cql[i], &lZeta) t0.Add(&t0, &t1) - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - t0.Mul(&cqr[i], &rZeta) t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - t0.Mul(&cqo[i], &oZeta) t0.Add(&t0, &cqk[i]) - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - for j := range qcpZeta { t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) From e4ff8ce1e50d28ab896dead8f260d8b30e43442d Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 19 Jan 2024 16:32:16 +0100 Subject: [PATCH 02/55] feat: add quotient to the linearised polynomial --- backend/plonk/bn254/prove.go | 62 +++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/backend/plonk/bn254/prove.go b/backend/plonk/bn254/prove.go index 0cccb65bc3..8b2676fb33 100644 --- a/backend/plonk/bn254/prove.go +++ b/backend/plonk/bn254/prove.go @@ -104,7 +104,7 @@ type Proof struct { Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zetaⁿ⁺²*h2 + zeta²⁽ⁿ⁺²⁾*h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -1298,6 +1298,13 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var rl fr.Element rl.Mul(&rZeta, &lZeta) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ* + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s1*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1322,20 +1329,22 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) s2.Neg(&s2).Mul(&s2, &alpha) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)*α - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, lagrangeZeta, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + Exp(lagrangeZeta, big.NewInt(nbElmt)) + zetaNPlusTwo.Mul(&lagrangeZeta, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + lagrangeZeta.Sub(&lagrangeZeta, &one) // ζⁿ - 1 + zhZeta.Set(&lagrangeZeta) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) den.Sub(&zeta, &one). Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) + lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ - 1)/(ζ-1) Mul(&lagrangeZeta, &alpha). Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() @@ -1361,23 +1370,38 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, t.Add(&t, &t0) } if i < len(cqm) { - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &lagrangeZeta) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]). + Mul(&t, &zhZeta) // Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } }) return blindedZCanonical From 6f39e5e9464b0c5fca0183f542ffc02a78e977de Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 19 Jan 2024 16:59:25 +0100 Subject: [PATCH 03/55] feat: remove foldedHDigest --- backend/plonk/bn254/prove.go | 84 ++++++++---------------------------- 1 file changed, 19 insertions(+), 65 deletions(-) diff --git a/backend/plonk/bn254/prove.go b/backend/plonk/bn254/prove.go index 8b2676fb33..01fd0d767f 100644 --- a/backend/plonk/bn254/prove.go +++ b/backend/plonk/bn254/prove.go @@ -154,9 +154,6 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -190,9 +187,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -647,44 +641,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -779,27 +735,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( From 03ddda02a8f78beaa3f107d341daaa73cd6dcf0c Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 19 Jan 2024 19:39:13 +0100 Subject: [PATCH 04/55] feat: using batch inversion --- backend/plonk/bn254/verify.go | 70 +++++++++++++++++------------------ 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index b119a71c9b..51b3a0a28a 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Code generated by gnark DO NOT EDIT - package plonk import ( @@ -44,6 +42,7 @@ var ( ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) @@ -96,37 +95,39 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ + // evaluation of Z=Xⁿ-1 at ζ var zetaPowerM, zzeta fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zzeta.Sub(&zetaPowerM, &one) // ζⁿ-1 // compute PI = ∑_{i Date: Mon, 22 Jan 2024 12:00:29 +0100 Subject: [PATCH 05/55] feat: opening of h0, h1, h2 ok --- backend/plonk/bn254/prove.go | 63 ++++++-------- backend/plonk/bn254/verify.go | 159 ++++++++++++++-------------------- 2 files changed, 93 insertions(+), 129 deletions(-) diff --git a/backend/plonk/bn254/prove.go b/backend/plonk/bn254/prove.go index 01fd0d767f..de67ff8451 100644 --- a/backend/plonk/bn254/prove.go +++ b/backend/plonk/bn254/prove.go @@ -209,7 +209,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -240,7 +239,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -720,13 +718,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -1241,8 +1232,8 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)**(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { @@ -1252,11 +1243,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var rl fr.Element rl.Mul(&rZeta, &lZeta) - // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ* + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // the linearised polynomial is // α²*L₁(ζ)*Z(X) + - // s1*s3(X)+s1*Z(X) + l(ζ)*Ql(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element @@ -1270,7 +1261,7 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*α + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1281,29 +1272,32 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2).Mul(&s2, &alpha) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)*α + s2.Neg(&s2).Mul(&s2, &alpha) // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z - var zhZeta, zetaNPlusTwo, lagrangeZeta, one, den, frNbElmt fr.Element + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)) - zetaNPlusTwo.Mul(&lagrangeZeta, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² - lagrangeZeta.Sub(&lagrangeZeta, &one) // ζⁿ - 1 - zhZeta.Set(&lagrangeZeta) // Z_h(ζ) = ζⁿ - 1 + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ - 1)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + // at this stage we have // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) @@ -1339,23 +1333,18 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) // α²L₁(ζ)Z(X) - blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) - // the hi are all of the same length - h1 := s.h1() - h2 := s.h2() - h3 := s.h3() if i < len(h1) { t.Mul(&h3[i], &zetaNPlusTwo). Add(&t, &h2[i]). Mul(&t, &zetaNPlusTwo). - Add(&t, &h1[i]). - Mul(&t, &zhZeta) // Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) } - blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) - } }) return blindedZCanonical diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index 51b3a0a28a..f4639e01e8 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -23,7 +23,6 @@ import ( "time" "github.com/consensys/gnark-crypto/ecc" - curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fp" @@ -96,16 +95,19 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac } // evaluation of Z=Xⁿ-1 at ζ - var zetaPowerM, zzeta fr.Element + var zetaPowerM, zetaPowerNMinusOne, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + zetaPowerNMinusOne.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zetaPowerNMinusOne). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i Date: Mon, 22 Jan 2024 15:11:46 +0100 Subject: [PATCH 06/55] feat: renaming zhZeta --- backend/plonk/bn254/verify.go | 24 ++++++++++++------------ std/recursion/plonk/verifier_test.go | 16 ++++++++-------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index f4639e01e8..6d85371a48 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -94,17 +94,17 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ-1 at ζ - var zetaPowerM, zetaPowerNMinusOne, lagrangeOne fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zetaPowerNMinusOne.Sub(&zetaPowerM, &one) // ζⁿ-1 - lagrangeOne.Sub(&zeta, &one). // ζ-1 - Inverse(&lagrangeOne). // 1/(ζ-1) - Mul(&lagrangeOne, &zetaPowerNMinusOne). // (ζ^n-1)/(ζ-1) - Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i Date: Tue, 23 Jan 2024 14:57:47 +0100 Subject: [PATCH 07/55] fix: verifier ok --- backend/plonk/bn254/verify.go | 54 ++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index 6d85371a48..20efb90ba2 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -36,8 +36,8 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { @@ -161,21 +161,48 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac } } + var _s1, _s2, tmp fr.Element + l := proof.BatchedProof.ClaimedValues[1] + r := proof.BatchedProof.ClaimedValues[2] + o := proof.BatchedProof.ClaimedValues[3] + s1 := proof.BatchedProof.ClaimedValues[4] + s2 := proof.BatchedProof.ClaimedValues[5] + + // Z(ωζ) + zu := proof.ZShiftedOpening.ClaimedValue + + // α²*L₁(ζ) + var alphaSquareLagrangeOne fr.Element + alphaSquareLagrangeOne.Mul(&lagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha) // α²*L₁(ζ) + + // computing the constant coefficient of the full algebraic relation + // , corresponding to the value of the linearisation polynomiat at ζ + // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + var constLin fr.Element + constLin.Mul(&beta, &s1).Add(&constLin, &gamma).Add(&constLin, &l) // (l(ζ)+β*s1(ζ)+γ) + tmp.Mul(&s2, &beta).Add(&tmp, &gamma).Add(&tmp, &r) // (r(ζ)+β*s2(ζ)+γ) + constLin.Mul(&constLin, &tmp) // (l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ) + tmp.Add(&o, &gamma) // (o(ζ)+γ) + constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + + // check that the opening of the linearised polynomial is equal to -constLin + openingLinPol := proof.BatchedProof.ClaimedValues[0] + if !constLin.Equal(&openingLinPol) { + return errAlgebraicRelation + } + // computing the linearised polynomial digest // α²*L₁(ζ)*[Z] + // _s1*[s3]+_s2*[Z] + l(ζ)*[Ql] + // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - var _s1, _s2, tmp, zu fr.Element - l := proof.BatchedProof.ClaimedValues[1] - r := proof.BatchedProof.ClaimedValues[2] - o := proof.BatchedProof.ClaimedValues[3] - s1 := proof.BatchedProof.ClaimedValues[4] - s2 := proof.BatchedProof.ClaimedValues[5] - zu = proof.ZShiftedOpening.ClaimedValue // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) _s1.Mul(&beta, &s1).Add(&_s1, &l).Add(&_s1, &gamma) // (l(ζ)+β*s1(β)+γ) @@ -187,12 +214,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Mul(&beta, &vk.CosetShift).Mul(&tmp, &zeta).Add(&tmp, &gamma).Add(&tmp, &r) // (r(ζ)+β*u*ζ+γ) _s2.Mul(&_s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &vk.CosetShift).Mul(&tmp, &vk.CosetShift).Mul(&tmp, &zeta).Add(&tmp, &o).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) - _s2.Mul(&_s2, &tmp).Mul(&_s2, &alpha).Neg(&_s2) // -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // -(ζⁿ-1) - - // α²*L₁(ζ) - var alphaSquareLagrangeOne fr.Element - alphaSquareLagrangeOne.Mul(&lagrangeOne, &alpha). - Mul(&alphaSquareLagrangeOne, &alpha) // α²*L₁(ζ) + _s2.Mul(&_s2, &tmp).Mul(&_s2, &alpha).Neg(&_s2) // -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // α²*L₁(ζ) - α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) var coeffZ fr.Element From 5ec808d38c8b8cd8c11e9fb77301a248483331a8 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Tue, 23 Jan 2024 15:12:46 +0100 Subject: [PATCH 08/55] feat: code gen plonk upgrade --- backend/plonk/bls12-377/prove.go | 218 ++++++---------- backend/plonk/bls12-377/verify.go | 232 ++++++++--------- backend/plonk/bls12-381/prove.go | 218 ++++++---------- backend/plonk/bls12-381/verify.go | 232 ++++++++--------- backend/plonk/bls24-315/prove.go | 218 ++++++---------- backend/plonk/bls24-315/verify.go | 232 ++++++++--------- backend/plonk/bls24-317/prove.go | 218 ++++++---------- backend/plonk/bls24-317/verify.go | 232 ++++++++--------- backend/plonk/bn254/prove.go | 7 +- backend/plonk/bn254/verify.go | 3 + backend/plonk/bw6-633/prove.go | 218 ++++++---------- backend/plonk/bw6-633/verify.go | 232 ++++++++--------- backend/plonk/bw6-761/prove.go | 218 ++++++---------- backend/plonk/bw6-761/verify.go | 232 ++++++++--------- .../zkpschemes/plonk/plonk.prove.go.tmpl | 245 +++++++----------- .../zkpschemes/plonk/plonk.verify.go.tmpl | 233 +++++++++-------- 16 files changed, 1424 insertions(+), 1764 deletions(-) diff --git a/backend/plonk/bls12-377/prove.go b/backend/plonk/bls12-377/prove.go index b720e79af9..4c624d057a 100644 --- a/backend/plonk/bls12-377/prove.go +++ b/backend/plonk/bls12-377/prove.go @@ -52,11 +52,6 @@ import ( "github.com/consensys/gnark/logger" ) -// TODO in gnark-crypto: -// * remove everything linked to the blinding -// * add SetCoeff method -// * modify GetCoeff -> if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R @@ -101,12 +96,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -151,14 +146,11 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -192,9 +184,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -217,7 +206,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -248,7 +236,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -268,8 +255,8 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -500,8 +487,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -649,44 +636,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -766,13 +715,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -781,27 +723,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( @@ -1289,16 +1229,24 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1306,11 +1254,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1321,27 +1269,35 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1353,43 +1309,39 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + } }) return blindedZCanonical diff --git a/backend/plonk/bls12-377/verify.go b/backend/plonk/bls12-377/verify.go index 6234beb7a3..dba02ac50a 100644 --- a/backend/plonk/bls12-377/verify.go +++ b/backend/plonk/bls12-377/verify.go @@ -39,12 +39,13 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bls12-377").Str("backend", "plonk").Logger() + + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { @@ -96,37 +97,42 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ - var zetaPowerM, zzeta fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R @@ -101,12 +96,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -151,14 +146,11 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -192,9 +184,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -217,7 +206,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -248,7 +236,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -268,8 +255,8 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -500,8 +487,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -649,44 +636,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -766,13 +715,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -781,27 +723,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( @@ -1289,16 +1229,24 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1306,11 +1254,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1321,27 +1269,35 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1353,43 +1309,39 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + } }) return blindedZCanonical diff --git a/backend/plonk/bls12-381/verify.go b/backend/plonk/bls12-381/verify.go index ea78e72096..559f4cb6e3 100644 --- a/backend/plonk/bls12-381/verify.go +++ b/backend/plonk/bls12-381/verify.go @@ -39,12 +39,13 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bls12-381").Str("backend", "plonk").Logger() + + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { @@ -96,37 +97,42 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ - var zetaPowerM, zzeta fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R @@ -101,12 +96,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -151,14 +146,11 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -192,9 +184,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -217,7 +206,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -248,7 +236,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -268,8 +255,8 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -500,8 +487,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -649,44 +636,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -766,13 +715,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -781,27 +723,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( @@ -1289,16 +1229,24 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1306,11 +1254,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1321,27 +1269,35 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1353,43 +1309,39 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + } }) return blindedZCanonical diff --git a/backend/plonk/bls24-315/verify.go b/backend/plonk/bls24-315/verify.go index 64076b69b5..64c8f0a0ea 100644 --- a/backend/plonk/bls24-315/verify.go +++ b/backend/plonk/bls24-315/verify.go @@ -39,12 +39,13 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bls24-315").Str("backend", "plonk").Logger() + + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { @@ -96,37 +97,42 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ - var zetaPowerM, zzeta fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R @@ -101,12 +96,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -151,14 +146,11 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -192,9 +184,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -217,7 +206,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -248,7 +236,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -268,8 +255,8 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -500,8 +487,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -649,44 +636,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -766,13 +715,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -781,27 +723,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( @@ -1289,16 +1229,24 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1306,11 +1254,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1321,27 +1269,35 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1353,43 +1309,39 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + } }) return blindedZCanonical diff --git a/backend/plonk/bls24-317/verify.go b/backend/plonk/bls24-317/verify.go index ceda45d811..5640be8848 100644 --- a/backend/plonk/bls24-317/verify.go +++ b/backend/plonk/bls24-317/verify.go @@ -39,12 +39,13 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bls24-317").Str("backend", "plonk").Logger() + + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { @@ -96,37 +97,42 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ - var zetaPowerM, zzeta fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index 20efb90ba2..4ec7c7d16e 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Code generated by gnark DO NOT EDIT + package plonk import ( @@ -23,6 +25,7 @@ import ( "time" "github.com/consensys/gnark-crypto/ecc" + curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fp" diff --git a/backend/plonk/bw6-633/prove.go b/backend/plonk/bw6-633/prove.go index fc25a75518..a23b9cd390 100644 --- a/backend/plonk/bw6-633/prove.go +++ b/backend/plonk/bw6-633/prove.go @@ -52,11 +52,6 @@ import ( "github.com/consensys/gnark/logger" ) -// TODO in gnark-crypto: -// * remove everything linked to the blinding -// * add SetCoeff method -// * modify GetCoeff -> if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R @@ -101,12 +96,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -151,14 +146,11 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -192,9 +184,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -217,7 +206,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -248,7 +236,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -268,8 +255,8 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -500,8 +487,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -649,44 +636,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -766,13 +715,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -781,27 +723,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( @@ -1289,16 +1229,24 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1306,11 +1254,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1321,27 +1269,35 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1353,43 +1309,39 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + } }) return blindedZCanonical diff --git a/backend/plonk/bw6-633/verify.go b/backend/plonk/bw6-633/verify.go index 5004b6955e..27440459b3 100644 --- a/backend/plonk/bw6-633/verify.go +++ b/backend/plonk/bw6-633/verify.go @@ -39,12 +39,13 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bw6-633").Str("backend", "plonk").Logger() + + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { @@ -96,37 +97,42 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ - var zetaPowerM, zzeta fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R @@ -101,12 +96,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -151,14 +146,11 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -192,9 +184,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -217,7 +206,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -248,7 +236,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -268,8 +255,8 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -500,8 +487,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -649,44 +636,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -766,13 +715,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -781,27 +723,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( @@ -1289,16 +1229,24 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { @@ -1306,11 +1254,11 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1321,27 +1269,35 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1353,43 +1309,39 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + } }) return blindedZCanonical diff --git a/backend/plonk/bw6-761/verify.go b/backend/plonk/bw6-761/verify.go index 17edc2fdef..96553b46c4 100644 --- a/backend/plonk/bw6-761/verify.go +++ b/backend/plonk/bw6-761/verify.go @@ -39,12 +39,13 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bw6-761").Str("backend", "plonk").Logger() + + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { @@ -96,37 +97,42 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ - var zetaPowerM, zzeta fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i if the poly is shifted and in canonical form the index is computed differently - const ( id_L int = iota id_R @@ -78,12 +73,12 @@ type Proof struct { // Commitment to Z, the permutation polynomial Z kzg.Digest - // Commitments to h1, h2, h3 such that h = h1 + Xh2 + X**2h3 is the quotient polynomial + // Commitments to h1, h2, h3 such that h = h1 + Xⁿ⁺²*h2 + X²⁽ⁿ⁺²⁾*h3 is the quotient polynomial H [3]kzg.Digest Bsb22Commitments []kzg.Digest - // Batch opening proof of h1 + zeta*h2 + zeta**2h3, linearizedPolynomial, l, r, o, s1, s2, qCPrime + // Batch opening proof of linearizedPolynomial, l, r, o, s1, s2, qCPrime BatchedProof kzg.BatchOpeningProof // Opening proof of Z at zeta*mu @@ -128,14 +123,11 @@ func Prove(spr *cs.SparseR1CS, pk *ProvingKey, fullWitness witness.Witness, opts g.Go(instance.buildRatioCopyConstraint) // compute h - g.Go(instance.evaluateConstraints) + g.Go(instance.computeQuotient) // open Z (blinded) at ωζ (proof.ZShiftedOpening) g.Go(instance.openZ) - // fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) - g.Go(instance.foldH) - // linearized polynomial g.Go(instance.computeLinearizedPolynomial) @@ -169,9 +161,6 @@ type instance struct { h *iop.Polynomial // h is the quotient polynomial blindedZ []fr.Element // blindedZ is the blinded version of Z - foldedH []fr.Element // foldedH is the folded version of H - foldedHDigest kzg.Digest // foldedHDigest is the kzg commitment of foldedH - linearizedPolynomial []fr.Element linearizedPolynomialDigest kzg.Digest @@ -194,7 +183,6 @@ type instance struct { chRestoreLRO, chZOpening, chLinearizedPolynomial, - chFoldedH, chGammaBeta chan struct{} domain0, domain1 *fft.Domain @@ -225,7 +213,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi chH: make(chan struct{}, 1), chZOpening: make(chan struct{}, 1), chLinearizedPolynomial: make(chan struct{}, 1), - chFoldedH: make(chan struct{}, 1), chRestoreLRO: make(chan struct{}, 1), } s.initBSB22Commitments() @@ -235,18 +222,18 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi // init fft domains nbConstraints := spr.GetNbConstraints() sizeSystem := uint64(nbConstraints + len(spr.Public)) // len(spr.Public) is for the placeholder constraints - s.domain0 = fft.NewDomain(sizeSystem) + s.domain0 = fft.NewDomain(sizeSystem) // h, the quotient polynomial is of degree 3(n+1)+2, so it's in a 3(n+2) dim vector space, // the domain is the next power of 2 superior to 3(n+2). 4*domainNum is enough in all cases // except when n<6. if sizeSystem < 6 { - s.domain1 = fft.NewDomain(8 * sizeSystem, fft.WithoutPrecompute()) + s.domain1 = fft.NewDomain(8*sizeSystem, fft.WithoutPrecompute()) } else { - s.domain1 = fft.NewDomain(4 * sizeSystem, fft.WithoutPrecompute()) + s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT --> precomputing the twiddles - // and storing them in memory is costly given its size. --> do a FFT on the fly + // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles + // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -271,7 +258,7 @@ func (s *instance) initBSB22Commitments() { // override the hint for the commitment constraints bsb22ID := solver.GetHintID(fcs.Bsb22CommitmentComputePlaceholder) - s.opt.SolverOpts = append(s.opt.SolverOpts, solver.OverrideHint(bsb22ID, s.bsb22Hint)) + s.opt.SolverOpts = append(s.opt.SolverOpts, solver.OverrideHint(bsb22ID, s.bsb22Hint)) } // Computing and verifying Bsb22 multi-commits explained in https://hackmd.io/x8KsadW3RRyX7YTCFJIkHg @@ -279,7 +266,7 @@ func (s *instance) bsb22Hint(_ *big.Int, ins, outs []*big.Int) error { var err error commDepth := int(ins[0].Int64()) ins = ins[1:] - + res := &s.commitmentVal[commDepth] commitmentInfo := s.spr.CommitmentInfo.(constraint.PlonkCommitments)[commDepth] @@ -298,13 +285,13 @@ func (s *instance) bsb22Hint(_ *big.Int, ins, outs []*big.Int) error { if s.proof.Bsb22Commitments[commDepth], err = kzg.Commit(s.cCommitments[commDepth].Coefficients(), s.pk.KzgLagrange); err != nil { return err } - + s.htfFunc.Write(s.proof.Bsb22Commitments[commDepth].Marshal()) hashBts := s.htfFunc.Sum(nil) s.htfFunc.Reset() nbBuf := fr.Bytes if s.htfFunc.Size() < fr.Bytes { - nbBuf = s.htfFunc.Size() + nbBuf = s.htfFunc.Size() } res.SetBytes(hashBts[:nbBuf]) // TODO @Tabaie use CommitmentIndex for this; create a new variable CommitmentConstraintIndex for other uses res.BigInt(outs[0]) @@ -477,8 +464,8 @@ func (s *instance) deriveZeta() (err error) { return } -// evaluateConstraints computes H -func (s *instance) evaluateConstraints() (err error) { +// computeQuotient computes H +func (s *instance) computeQuotient() (err error) { s.x[id_Ql] = s.trace.Ql s.x[id_Qr] = s.trace.Qr s.x[id_Qm] = s.trace.Qm @@ -626,44 +613,6 @@ func (s *instance) h3() []fr.Element { return h3 } -// fold the commitment to H ([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾[H₂]) -func (s *instance) foldH() error { - // wait for H to be committed and zeta to be derived (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chH: - } - var n big.Int - n.SetUint64(s.domain0.Cardinality + 2) - - var zetaPowerNplusTwo fr.Element - zetaPowerNplusTwo.Exp(s.zeta, &n) - zetaPowerNplusTwo.BigInt(&n) - - s.foldedHDigest.ScalarMultiplication(&s.proof.H[2], &n) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[1]) // ζᵐ⁺²*Comm(h3) - s.foldedHDigest.ScalarMultiplication(&s.foldedHDigest, &n) // ζ²⁽ᵐ⁺²⁾*Comm(h3) + ζᵐ⁺²*Comm(h2) - s.foldedHDigest.Add(&s.foldedHDigest, &s.proof.H[0]) - - // fold H (H₀ + ζᵐ⁺²*H₁ + ζ²⁽ᵐ⁺²⁾H₂)) - h1 := s.h1() - h2 := s.h2() - s.foldedH = s.h3() - - for i := 0; i < int(s.domain0.Cardinality)+2; i++ { - s.foldedH[i]. - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h2[i]). - Mul(&s.foldedH[i], &zetaPowerNplusTwo). - Add(&s.foldedH[i], &h1[i]) - } - - close(s.chFoldedH) - - return nil -} - func (s *instance) computeLinearizedPolynomial() error { // wait for H to be committed and zeta to be derived (or ctx.Done()) @@ -743,13 +692,6 @@ func (s *instance) batchOpening() error { case <-s.chLRO: } - // wait for foldedH to be computed (or ctx.Done()) - select { - case <-s.ctx.Done(): - return errContextDone - case <-s.chFoldedH: - } - // wait for linearizedPolynomial to be computed (or ctx.Done()) select { case <-s.ctx.Done(): @@ -758,27 +700,25 @@ func (s *instance) batchOpening() error { } polysQcp := coefficients(s.trace.Qcp) - polysToOpen := make([][]fr.Element, 7+len(polysQcp)) - copy(polysToOpen[7:], polysQcp) - - polysToOpen[0] = s.foldedH - polysToOpen[1] = s.linearizedPolynomial - polysToOpen[2] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) - polysToOpen[3] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) - polysToOpen[4] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) - polysToOpen[5] = s.trace.S1.Coefficients() - polysToOpen[6] = s.trace.S2.Coefficients() - - digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+7) - copy(digestsToOpen[7:], s.pk.Vk.Qcp) - - digestsToOpen[0] = s.foldedHDigest - digestsToOpen[1] = s.linearizedPolynomialDigest - digestsToOpen[2] = s.proof.LRO[0] - digestsToOpen[3] = s.proof.LRO[1] - digestsToOpen[4] = s.proof.LRO[2] - digestsToOpen[5] = s.pk.Vk.S[0] - digestsToOpen[6] = s.pk.Vk.S[1] + polysToOpen := make([][]fr.Element, 6+len(polysQcp)) + copy(polysToOpen[6:], polysQcp) + + polysToOpen[0] = s.linearizedPolynomial + polysToOpen[1] = getBlindedCoefficients(s.x[id_L], s.bp[id_Bl]) + polysToOpen[2] = getBlindedCoefficients(s.x[id_R], s.bp[id_Br]) + polysToOpen[3] = getBlindedCoefficients(s.x[id_O], s.bp[id_Bo]) + polysToOpen[4] = s.trace.S1.Coefficients() + polysToOpen[5] = s.trace.S2.Coefficients() + + digestsToOpen := make([]curve.G1Affine, len(s.pk.Vk.Qcp)+6) + copy(digestsToOpen[6:], s.pk.Vk.Qcp) + + digestsToOpen[0] = s.linearizedPolynomialDigest + digestsToOpen[1] = s.proof.LRO[0] + digestsToOpen[2] = s.proof.LRO[1] + digestsToOpen[3] = s.proof.LRO[2] + digestsToOpen[4] = s.pk.Vk.S[0] + digestsToOpen[5] = s.pk.Vk.S[1] var err error s.proof.BatchedProof, err = kzg.BatchOpenSinglePoint( @@ -796,7 +736,7 @@ func (s *instance) batchOpening() error { // evaluate the full set of constraints, all polynomials in x are back in // canonical regular form at the end func (s *instance) computeNumerator() (*iop.Polynomial, error) { - // init vectors that are used multiple times throughout the computation + // init vectors that are used multiple times throughout the computation n := s.domain0.Cardinality twiddles0 := make([]fr.Element, n) if n == 1 { @@ -821,8 +761,6 @@ func (s *instance) computeNumerator() (*iop.Polynomial, error) { case <-s.chQk: } - - nbBsbGates := (len(s.x) - id_Qci + 1) >> 1 gateConstraint := func(u ...fr.Element) fr.Element { @@ -844,7 +782,6 @@ func (s *instance) computeNumerator() (*iop.Polynomial, error) { return ic } - var cs, css fr.Element cs.Set(&s.domain1.FrMultiplicativeGen) css.Square(&cs) @@ -1041,7 +978,7 @@ func (s *instance) computeNumerator() (*iop.Polynomial, error) { batchApply(s.x, func(p *iop.Polynomial) { if p == nil { - return + return } p.ToCanonical(s.domain0, 8).ToRegular() scalePowers(p, cs) @@ -1269,28 +1206,36 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // The Linearized polynomial is: // // α²*L₁(ζ)*Z(X) -// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) -// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) +// + α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(β*s3(X))*Z(μζ) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) +// + l(ζ)*Ql(X) + l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) +// - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { + // TODO @gbotrel rename - // first part: individual constraints + + // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) - // second part: - // Z(μζ)(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*s3(X)-Z(X)(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ) + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + // the linearised polynomial is + // α²*L₁(ζ)*Z(X) + + // s1*s3(X)+s2*Z(X) + l(ζ)*Ql(X) + + // l(ζ)r(ζ)*Qm(X) + r(ζ)*Qr(X) + o(ζ)*Qo(X) + Qk(X) + ∑ᵢQcp_(ζ)Pi_(X) - + // Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) var s1, s2 fr.Element chS1 := make(chan struct{}, 1) go func() { - s1 = s.trace.S1.Evaluate(zeta) // s1(ζ) + s1 = s.trace.S1.Evaluate(zeta) // s1(ζ) s1.Mul(&s1, &beta).Add(&s1, &lZeta).Add(&s1, &gamma) // (l(ζ)+β*s1(ζ)+γ) close(chS1) }() - // ps2 := iop.NewPolynomial(&pk.S2Canonical, iop.Form{Basis: iop.Canonical, Layout: iop.Regular}) - tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) + + tmp := s.trace.S2.Evaluate(zeta) // s2(ζ) tmp.Mul(&tmp, &beta).Add(&tmp, &rZeta).Add(&tmp, &gamma) // (r(ζ)+β*s2(ζ)+γ) <-chS1 - s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + s1.Mul(&s1, &tmp).Mul(&s1, &zu).Mul(&s1, &beta).Mul(&s1, &alpha) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ)*α var uzeta, uuzeta fr.Element uzeta.Mul(&zeta, &pk.Vk.CosetShift) @@ -1301,27 +1246,35 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) tmp.Mul(&beta, &uuzeta).Add(&tmp, &oZeta).Add(&tmp, &gamma) // (o(ζ)+β*u²*ζ+γ) s2.Mul(&s2, &tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - s2.Neg(&s2) // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + s2.Neg(&s2).Mul(&s2, &alpha) - // third part L₁(ζ)*α²*Z - var lagrangeZeta, one, den, frNbElmt fr.Element + // Z_h(ζ), ζⁿ⁺², L₁(ζ)*α²*Z + var zhZeta, zetaNPlusTwo, alphaSquareLagrangeOne, one, den, frNbElmt fr.Element one.SetOne() nbElmt := int64(s.domain0.Cardinality) - lagrangeZeta.Set(&zeta). - Exp(lagrangeZeta, big.NewInt(nbElmt)). - Sub(&lagrangeZeta, &one) + alphaSquareLagrangeOne.Set(&zeta).Exp(alphaSquareLagrangeOne, big.NewInt(nbElmt)) // ζⁿ + zetaNPlusTwo.Mul(&alphaSquareLagrangeOne, &zeta).Mul(&zetaNPlusTwo, &zeta) // ζⁿ⁺² + alphaSquareLagrangeOne.Sub(&alphaSquareLagrangeOne, &one) // ζⁿ - 1 + zhZeta.Set(&alphaSquareLagrangeOne) // Z_h(ζ) = ζⁿ - 1 frNbElmt.SetUint64(uint64(nbElmt)) - den.Sub(&zeta, &one). - Inverse(&den) - lagrangeZeta.Mul(&lagrangeZeta, &den). // L₁ = (ζⁿ⁻¹)/(ζ-1) - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &alpha). - Mul(&lagrangeZeta, &s.domain0.CardinalityInv) // (1/n)*α²*L₁(ζ) + den.Sub(&zeta, &one).Inverse(&den) // 1/(ζ-1) + alphaSquareLagrangeOne.Mul(&alphaSquareLagrangeOne, &den). // L₁ = (ζⁿ - 1)/(ζ-1) + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &alpha). + Mul(&alphaSquareLagrangeOne, &s.domain0.CardinalityInv) // α²*L₁(ζ) s3canonical := s.trace.S3.Coefficients() s.trace.Qk.ToCanonical(s.domain0).ToRegular() + // the hi are all of the same length + h1 := s.h1() + h2 := s.h2() + h3 := s.h3() + + // at this stage we have + // s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) + // s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) utils.Parallelize(len(blindedZCanonical), func(start, end int) { cql := s.trace.Ql.Coefficients() @@ -1333,46 +1286,42 @@ func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, var t, t0, t1 fr.Element for i := start; i < end; i++ { - - t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - + t.Mul(&blindedZCanonical[i], &s2) // -Z(X)*α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) if i < len(s3canonical) { - - t0.Mul(&s3canonical[i], &s1) // (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - + t0.Mul(&s3canonical[i], &s1) // α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ)*β*s3(X) t.Add(&t, &t0) } - - t.Mul(&t, &alpha) // α*( (l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*s3(X) - Z(X)*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)) - if i < len(cqm) { - - t1.Mul(&cqm[i], &rl) // linPol = linPol + l(ζ)r(ζ)*Qm(X) - - t0.Mul(&cql[i], &lZeta) - t0.Add(&t0, &t1) - - t.Add(&t, &t0) // linPol = linPol + l(ζ)*Ql(X) - - t0.Mul(&cqr[i], &rZeta) - t.Add(&t, &t0) // linPol = linPol + r(ζ)*Qr(X) - - t0.Mul(&cqo[i], &oZeta) - t0.Add(&t0, &cqk[i]) - - t.Add(&t, &t0) // linPol = linPol + o(ζ)*Qo(X) + Qk(X) - - for j := range qcpZeta { + t1.Mul(&cqm[i], &rl) // l(ζ)r(ζ)*Qm(X) + t.Add(&t, &t1) // linPol += l(ζ)r(ζ)*Qm(X) + t0.Mul(&cql[i], &lZeta) // l(ζ)Q_l(X) + t.Add(&t, &t0) // linPol += l(ζ)*Ql(X) + t0.Mul(&cqr[i], &rZeta) //r(ζ)*Qr(X) + t.Add(&t, &t0) // linPol += r(ζ)*Qr(X) + t0.Mul(&cqo[i], &oZeta) // o(ζ)*Qo(X) + t.Add(&t, &t0) // linPol += o(ζ)*Qo(X) + t.Add(&t, &cqk[i]) // linPol += Qk(X) + for j := range qcpZeta { // linPol += ∑ᵢQcp_(ζ)Pi_(X) t0.Mul(&pi2Canonical[j][i], &qcpZeta[j]) t.Add(&t, &t0) } } - t0.Mul(&blindedZCanonical[i], &lagrangeZeta) - blindedZCanonical[i].Add(&t, &t0) // finish the computation + t0.Mul(&blindedZCanonical[i], &alphaSquareLagrangeOne) // α²L₁(ζ)Z(X) + blindedZCanonical[i].Add(&t, &t0) // linPol += α²L₁(ζ)Z(X) + + if i < len(h1) { + t.Mul(&h3[i], &zetaNPlusTwo). + Add(&t, &h2[i]). + Mul(&t, &zetaNPlusTwo). + Add(&t, &h1[i]) + t.Mul(&t, &zhZeta) + blindedZCanonical[i].Sub(&blindedZCanonical[i], &t) // linPol -= Z_h(ζ)*(H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) + } + } }) return blindedZCanonical } -var errContextDone = errors.New("context done") \ No newline at end of file +var errContextDone = errors.New("context done") diff --git a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl index 6d2c6bccb0..8543458c69 100644 --- a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl +++ b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl @@ -22,12 +22,13 @@ import ( ) var ( - errWrongClaimedQuotient = errors.New("claimed quotient is not as expected") - errInvalidWitness = errors.New("witness length is invalid") + errAlgebraicRelation = errors.New("algebraic relation does not hold") + errInvalidWitness = errors.New("witness length is invalid") ) func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "{{ toLower .Curve }}").Str("backend", "plonk").Logger() + + log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { @@ -42,7 +43,6 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return errInvalidWitness } - // transcript to derive the challenge fs := fiatshamir.NewTranscript(cfg.ChallengeHash, "gamma", "beta", "alpha", "zeta") @@ -80,37 +80,42 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // evaluation of Z=Xⁿ⁻¹ at ζ - var zetaPowerM, zzeta fr.Element + // evaluation of zhZeta=ζⁿ-1 + var zetaPowerM, zhZeta, lagrangeOne fr.Element var bExpo big.Int one := fr.One() bExpo.SetUint64(vk.Size) zetaPowerM.Exp(zeta, &bExpo) - zzeta.Sub(&zetaPowerM, &one) + zhZeta.Sub(&zetaPowerM, &one) // ζⁿ-1 + lagrangeOne.Sub(&zeta, &one). // ζ-1 + Inverse(&lagrangeOne). // 1/(ζ-1) + Mul(&lagrangeOne, &zhZeta). // (ζ^n-1)/(ζ-1) + Mul(&lagrangeOne, &vk.SizeInv) // 1/n * (ζ^n-1)/(ζ-1) // compute PI = ∑_{i Date: Thu, 25 Jan 2024 17:04:21 +0100 Subject: [PATCH 09/55] feat: fix unmarshalling solidity --- backend/plonk/bn254/unmarshal.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/backend/plonk/bn254/unmarshal.go b/backend/plonk/bn254/unmarshal.go index bdc578cac2..cc3e7b8add 100644 --- a/backend/plonk/bn254/unmarshal.go +++ b/backend/plonk/bn254/unmarshal.go @@ -41,7 +41,7 @@ func UnmarshalSolidity(s []byte, nbCommits int) Proof { // uint256 o_at_zeta; // uint256 s1_at_zeta; // uint256 s2_at_zeta; - for i := 2; i < 7; i++ { + for i := 1; i < 6; i++ { proof.BatchedProof.ClaimedValues[i].SetBytes(s[offset : offset+fr_size]) offset += fr_size } @@ -59,8 +59,6 @@ func UnmarshalSolidity(s []byte, nbCommits int) Proof { // uint256 linearization_polynomial_at_zeta; proof.BatchedProof.ClaimedValues[0].SetBytes(s[offset : offset+fr_size]) offset += fr_size - proof.BatchedProof.ClaimedValues[1].SetBytes(s[offset : offset+fr_size]) - offset += fr_size // uint256 opening_at_zeta_proof_x; // uint256 opening_at_zeta_proof_y; From f6f0d877e280ee5fb9091b32f83cab1d94d39418 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 11:57:49 +0100 Subject: [PATCH 10/55] fix: fixed MarshalSolidity --- backend/plonk/bn254/solidity.go | 239 +++++++++++++++----------------- 1 file changed, 114 insertions(+), 125 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 02bd82412e..5bd1b0ef9a 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -88,27 +88,26 @@ contract PlonkVerifier { uint256 private constant PROOF_R_AT_ZETA = 0x1a0; uint256 private constant PROOF_O_AT_ZETA = 0x1c0; - //uint256[STATE_WIDTH-1] permutation_polynomials_at_zeta; // Sσ1(zeta),Sσ2(zeta) + // S1(zeta),S2(zeta) uint256 private constant PROOF_S1_AT_ZETA = 0x1e0; // Sσ1(zeta) uint256 private constant PROOF_S2_AT_ZETA = 0x200; // Sσ2(zeta) - //Bn254.G1Point grand_product_commitment; // [z(x)] + // [Z] uint256 private constant PROOF_GRAND_PRODUCT_COMMITMENT_X = 0x220; uint256 private constant PROOF_GRAND_PRODUCT_COMMITMENT_Y = 0x240; uint256 private constant PROOF_GRAND_PRODUCT_AT_ZETA_OMEGA = 0x260; // z(w*zeta) - uint256 private constant PROOF_QUOTIENT_POLYNOMIAL_AT_ZETA = 0x280; // t(zeta) - uint256 private constant PROOF_LINEARISED_POLYNOMIAL_AT_ZETA = 0x2a0; // r(zeta) + uint256 private constant PROOF_LINEARISED_POLYNOMIAL_AT_ZETA = 0x280; // r(zeta) - // Folded proof for the opening of H, linearised poly, l, r, o, s_1, s_2, qcp - uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_X = 0x2c0; // [Wzeta] - uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_Y = 0x2e0; + // Folded proof for the opening of linearised poly, l, r, o, s_1, s_2, qcp + uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_X = 0x2a0; + uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_Y = 0x2c0; - uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_X = 0x300; - uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_Y = 0x320; + uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_X = 0x3e0; + uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_Y = 0x300; - uint256 private constant PROOF_OPENING_QCP_AT_ZETA = 0x340; - uint256 private constant PROOF_COMMITMENTS_WIRES_CUSTOM_GATES = {{ hex (add 832 (mul (len .CommitmentConstraintIndexes) 32 ) )}}; + uint256 private constant PROOF_OPENING_QCP_AT_ZETA = 0x320; + uint256 private constant PROOF_BSB_COMMITMENTS = {{ hex (add 800 (mul (len .CommitmentConstraintIndexes) 32 ) )}}; // -> next part of proof is // [ openings_selector_commits || commitments_wires_commit_api] @@ -150,6 +149,13 @@ contract PlonkVerifier { uint256 private constant STATE_LAST_MEM = 0x220; + // -------- utils (for Fiat Shamir) + uint256 private constant FS_ALPHA = 0x616C706861; // "alpha" + uint256 private constant FS_BETA = 0x62657461; // "beta" + uint256 private constant FS_GAMMA = 0x67616d6d61; // "gamma" + uint256 private constant FS_ZETA = 0x7a657461; // "zeta" + uint256 private constant FS_GAMMA_KZG = 0x67616d6d61; // "gamma" + // -------- errors uint256 private constant ERROR_STRING_ID = 0x08c379a000000000000000000000000000000000000000000000000000000000; // selector for function Error(string) @@ -196,15 +202,15 @@ contract PlonkVerifier { mstore(add(mem, STATE_ZETA_POWER_N_MINUS_ONE), zeta_power_n_minus_one) // public inputs contribution - let l_pi := sum_pi_wo_api_commit(public_inputs.offset, public_inputs.length, freeMem) + let l_wocommit := sum_pi_wo_api_commit(public_inputs.offset, public_inputs.length, freeMem) {{ if (gt (len .CommitmentConstraintIndexes) 0 ) -}} - let l_wocommit := sum_pi_commit(proof.offset, public_inputs.length, freeMem) + let l_pi := sum_pi_commit(proof.offset, public_inputs.length, freeMem) l_pi := addmod(l_wocommit, l_pi, R_MOD) {{ end -}} mstore(add(mem, STATE_PI), l_pi) compute_alpha_square_lagrange_0() - verify_quotient_poly_eval_at_zeta(proof.offset) + verify_opening_linearised_polynomial(proof.offset) fold_h(proof.offset) compute_commitment_linearised_polynomial(proof.offset) compute_gamma_kzg(proof.offset) @@ -329,12 +335,6 @@ contract PlonkVerifier { if gt(calldataload(p), R_MOD_MINUS_ONE) { error_proof_openings_size() } - - // quotient polynomial at zeta - p := add(aproof, PROOF_QUOTIENT_POLYNOMIAL_AT_ZETA) - if gt(calldataload(p), R_MOD_MINUS_ONE) { - error_proof_openings_size() - } // PROOF_L_AT_ZETA p := add(aproof, PROOF_L_AT_ZETA) @@ -412,7 +412,7 @@ contract PlonkVerifier { // gamma // gamma in ascii is [0x67,0x61,0x6d, 0x6d, 0x61] // (same for alpha, beta, zeta) - mstore(mPtr, 0x67616d6d61) // "gamma" + mstore(mPtr, FS_GAMMA) // "gamma" mstore(add(mPtr, 0x20), VK_S1_COM_X) mstore(add(mPtr, 0x40), VK_S1_COM_Y) @@ -472,7 +472,7 @@ contract PlonkVerifier { let mPtr := add(mload(0x40), STATE_LAST_MEM) // beta - mstore(mPtr, 0x62657461) // "beta" + mstore(mPtr, FS_BETA) // "beta" mstore(add(mPtr, 0x20), gamma_not_reduced) let l_success := staticcall(gas(), 0x2, add(mPtr, 0x1c), 0x24, mPtr, 0x20) //0x1b -> 000.."gamma" if iszero(l_success) { @@ -496,13 +496,13 @@ contract PlonkVerifier { let full_size := 0x65 // size("alpha") + 0x20 (previous challenge) // alpha - mstore(mPtr, 0x616C706861) // "alpha" + mstore(mPtr, FS_ALPHA) // "alpha" let _mPtr := add(mPtr, 0x20) mstore(_mPtr, beta_not_reduced) _mPtr := add(_mPtr, 0x20) {{ if (gt (len .CommitmentConstraintIndexes) 0 )}} // Bsb22Commitments - let proof_bsb_commitments := add(aproof, PROOF_COMMITMENTS_WIRES_CUSTOM_GATES) + let proof_bsb_commitments := add(aproof, PROOF_BSB_COMMITMENTS) let size_bsb_commitments := mul(0x40, VK_NB_CUSTOM_GATES) calldatacopy(_mPtr, proof_bsb_commitments, size_bsb_commitments) _mPtr := add(_mPtr, size_bsb_commitments) @@ -530,7 +530,7 @@ contract PlonkVerifier { let mPtr := add(mload(0x40), STATE_LAST_MEM) // zeta - mstore(mPtr, 0x7a657461) // "zeta" + mstore(mPtr, FS_ZETA) // "zeta" mstore(add(mPtr, 0x20), alpha_not_reduced) calldatacopy(add(mPtr, 0x40), add(aproof, PROOF_H_0_X), 0xc0) let l_success := staticcall(gas(), 0x2, add(mPtr, 0x1c), 0xe4, mPtr, 0x20) @@ -639,7 +639,7 @@ contract PlonkVerifier { let z := mload(add(state, STATE_ZETA)) let zpnmo := mload(add(state, STATE_ZETA_POWER_N_MINUS_ONE)) - let p := add(aproof, PROOF_COMMITMENTS_WIRES_CUSTOM_GATES) + let p := add(aproof, PROOF_BSB_COMMITMENTS) let h_fr, ith_lagrange @@ -914,14 +914,10 @@ contract PlonkVerifier { let acc_gamma := l_gamma_kzg let state_folded_digests := add(state, STATE_FOLDED_DIGESTS_X) - mstore(add(state, STATE_FOLDED_DIGESTS_X), mload(add(state, STATE_FOLDED_H_X))) - mstore(add(state, STATE_FOLDED_DIGESTS_Y), mload(add(state, STATE_FOLDED_H_Y))) - mstore(add(state, STATE_FOLDED_CLAIMED_VALUES), calldataload(add(aproof, PROOF_QUOTIENT_POLYNOMIAL_AT_ZETA))) - - point_acc_mul(state_folded_digests, add(state, STATE_LINEARISED_POLYNOMIAL_X), acc_gamma, mPtr) - fr_acc_mul_calldata(add(state, STATE_FOLDED_CLAIMED_VALUES), add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA), acc_gamma) + mstore(add(state, STATE_FOLDED_DIGESTS_X), mload(add(state, STATE_LINEARISED_POLYNOMIAL_X))) + mstore(add(state, STATE_FOLDED_DIGESTS_Y), mload(add(state, STATE_LINEARISED_POLYNOMIAL_Y))) + mstore(add(state, STATE_FOLDED_CLAIMED_VALUES), calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA))) - acc_gamma := mulmod(acc_gamma, l_gamma_kzg, R_MOD) point_acc_mul_calldata(add(state, STATE_FOLDED_DIGESTS_X), add(aproof, PROOF_L_COM_X), acc_gamma, mPtr) fr_acc_mul_calldata(add(state, STATE_FOLDED_CLAIMED_VALUES), add(aproof, PROOF_L_AT_ZETA), acc_gamma) @@ -946,18 +942,16 @@ contract PlonkVerifier { fr_acc_mul_calldata(add(state, STATE_FOLDED_CLAIMED_VALUES), add(aproof, PROOF_S2_AT_ZETA), acc_gamma) {{- if (gt (len .CommitmentConstraintIndexes) 0 ) }} - let poscaz := add(aproof, PROOF_OPENING_QCP_AT_ZETA) - {{ end -}} - + let poqaz := add(aproof, PROOF_OPENING_QCP_AT_ZETA) {{ range $index, $element := .CommitmentConstraintIndexes }} acc_gamma := mulmod(acc_gamma, l_gamma_kzg, R_MOD) mstore(mPtr, VK_QCP_{{ $index }}_X) mstore(mPtr20, VK_QCP_{{ $index }}_Y) point_acc_mul(state_folded_digests, mPtr, acc_gamma, mPtr40) - fr_acc_mul_calldata(add(state, STATE_FOLDED_CLAIMED_VALUES), poscaz, acc_gamma) - poscaz := add(poscaz, 0x20) + fr_acc_mul_calldata(add(state, STATE_FOLDED_CLAIMED_VALUES), poqaz, acc_gamma) + poqaz := add(poqaz, 0x20) {{ end }} - + {{ end -}} } /// @notice generate the challenge (using Fiat Shamir) to fold the opening proofs @@ -965,13 +959,11 @@ contract PlonkVerifier { /// The process for deriving γ is the same as in derive_gamma but this time the inputs are /// in this order (the [] means it's a commitment): /// * ζ - /// * [H] ( = H₁ + ζᵐ⁺²*H₂ + ζ²⁽ᵐ⁺²⁾*H₃ ) /// * [Linearised polynomial] /// * [L], [R], [O] /// * [S₁] [S₂] /// * [Pi_{i}] (wires associated to custom gates) /// Then there are the purported evaluations of the previous committed polynomials: - /// * H(ζ) /// * Linearised_polynomial(ζ) /// * L(ζ), R(ζ), O(ζ), S₁(ζ), S₂(ζ) /// * Pi_{i}(ζ) @@ -981,48 +973,41 @@ contract PlonkVerifier { let state := mload(0x40) let mPtr := add(mload(0x40), STATE_LAST_MEM) - mstore(mPtr, 0x67616d6d61) // "gamma" + mstore(mPtr, FS_GAMMA_KZG) // "gamma" mstore(add(mPtr, 0x20), mload(add(state, STATE_ZETA))) - mstore(add(mPtr,0x40), mload(add(state, STATE_FOLDED_H_X))) - mstore(add(mPtr,0x60), mload(add(state, STATE_FOLDED_H_Y))) - mstore(add(mPtr,0x80), mload(add(state, STATE_LINEARISED_POLYNOMIAL_X))) - mstore(add(mPtr,0xa0), mload(add(state, STATE_LINEARISED_POLYNOMIAL_Y))) - calldatacopy(add(mPtr, 0xc0), add(aproof, PROOF_L_COM_X), 0xc0) - mstore(add(mPtr,0x180), VK_S1_COM_X) - mstore(add(mPtr,0x1a0), VK_S1_COM_Y) - mstore(add(mPtr,0x1c0), VK_S2_COM_X) - mstore(add(mPtr,0x1e0), VK_S2_COM_Y) + mstore(add(mPtr,0x40), mload(add(state, STATE_LINEARISED_POLYNOMIAL_X))) + mstore(add(mPtr,0x60), mload(add(state, STATE_LINEARISED_POLYNOMIAL_Y))) + calldatacopy(add(mPtr, 0x80), add(aproof, PROOF_L_COM_X), 0xc0) + mstore(add(mPtr,0x140), VK_S1_COM_X) + mstore(add(mPtr,0x160), VK_S1_COM_Y) + mstore(add(mPtr,0x180), VK_S2_COM_X) + mstore(add(mPtr,0x1a0), VK_S2_COM_Y) - let offset := 0x200 - {{ range $index, $element := .CommitmentConstraintIndexes }} - mstore(add(mPtr,offset), VK_QCP_{{ $index }}_X) - mstore(add(mPtr,add(offset, 0x20)), VK_QCP_{{ $index }}_Y) + let offset := 0x1c0 + + mstore(add(mPtr,offset), VK_QCP_0_X) + mstore(add(mPtr,add(offset, 0x20)), VK_QCP_0_Y) offset := add(offset, 0x40) - {{ end }} + + mstore(add(mPtr, offset), calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA))) + mstore(add(mPtr, add(offset, 0x20)), calldataload(add(aproof, PROOF_L_AT_ZETA))) + mstore(add(mPtr, add(offset, 0x40)), calldataload(add(aproof, PROOF_R_AT_ZETA))) + mstore(add(mPtr, add(offset, 0x60)), calldataload(add(aproof, PROOF_O_AT_ZETA))) + mstore(add(mPtr, add(offset, 0x80)), calldataload(add(aproof, PROOF_S1_AT_ZETA))) + mstore(add(mPtr, add(offset, 0xa0)), calldataload(add(aproof, PROOF_S2_AT_ZETA))) - mstore(add(mPtr, offset), calldataload(add(aproof, PROOF_QUOTIENT_POLYNOMIAL_AT_ZETA))) - mstore(add(mPtr, add(offset, 0x20)), calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA))) - mstore(add(mPtr, add(offset, 0x40)), calldataload(add(aproof, PROOF_L_AT_ZETA))) - mstore(add(mPtr, add(offset, 0x60)), calldataload(add(aproof, PROOF_R_AT_ZETA))) - mstore(add(mPtr, add(offset, 0x80)), calldataload(add(aproof, PROOF_O_AT_ZETA))) - mstore(add(mPtr, add(offset, 0xa0)), calldataload(add(aproof, PROOF_S1_AT_ZETA))) - mstore(add(mPtr, add(offset, 0xc0)), calldataload(add(aproof, PROOF_S2_AT_ZETA))) + let _mPtr := add(mPtr, add(offset, 0xc0)) - let _mPtr := add(mPtr, add(offset, 0xe0)) {{ if (gt (len .CommitmentConstraintIndexes) 0 )}} - let _poscaz := add(aproof, PROOF_OPENING_QCP_AT_ZETA) - for {let i:=0} lt(i, VK_NB_CUSTOM_GATES) {i:=add(i,1)} - { - mstore(_mPtr, calldataload(_poscaz)) - _poscaz := add(_poscaz, 0x20) - _mPtr := add(_mPtr, 0x20) - } + let _poqaz := add(aproof, PROOF_OPENING_QCP_AT_ZETA) + calldatacopy(_mPtr, _poqaz, mul(VK_NB_CUSTOM_GATES, 0x20)) + _mPtr := add(_mPtr, mul(VK_NB_CUSTOM_GATES, 0x20)) {{ end }} mstore(_mPtr, calldataload(add(aproof, PROOF_GRAND_PRODUCT_AT_ZETA_OMEGA))) let start_input := 0x1b // 00.."gamma" - let size_input := add(0x17, mul(VK_NB_CUSTOM_GATES,3)) // number of 32bytes elmts = 0x17 (zeta+2*7+7 for the digests+openings) + 2*VK_NB_CUSTOM_GATES (for the commitments of the selectors) + VK_NB_CUSTOM_GATES (for the openings of the selectors) + let size_input := add(0x14, mul(VK_NB_CUSTOM_GATES,3)) // number of 32bytes elmts = 0x17 (zeta+3*6 for the digests+openings) + 3*VK_NB_CUSTOM_GATES (for the commitments of the selectors) + 1 (opening of Z at ζω) size_input := add(0x5, mul(size_input, 0x20)) // size in bytes: 15*32 bytes + 5 bytes for gamma let check_staticcall := staticcall(gas(), 0x2, add(mPtr,start_input), size_input, add(state, STATE_GAMMA_KZG), 0x20) if iszero(check_staticcall) { @@ -1032,6 +1017,7 @@ contract PlonkVerifier { } function compute_commitment_linearised_polynomial_ec(aproof, s1, s2) { + let state := mload(0x40) let mPtr := add(mload(0x40), STATE_LAST_MEM) @@ -1076,24 +1062,26 @@ contract PlonkVerifier { add(mPtr, 0x40) ) - let commits_api_at_zeta := add(aproof, PROOF_OPENING_QCP_AT_ZETA) - let commits_api := add(aproof, PROOF_COMMITMENTS_WIRES_CUSTOM_GATES) + {{ if (gt (len .CommitmentConstraintIndexes) 0 )}} + let qcp_opening_at_zeta := add(aproof, PROOF_OPENING_QCP_AT_ZETA) + let bsb_commitments := add(aproof, PROOF_BSB_COMMITMENTS) for { let i := 0 } lt(i, VK_NB_CUSTOM_GATES) { i := add(i, 1) } { - mstore(mPtr, calldataload(commits_api)) - mstore(add(mPtr, 0x20), calldataload(add(commits_api, 0x20))) + mstore(mPtr, calldataload(bsb_commitments)) + mstore(add(mPtr, 0x20), calldataload(add(bsb_commitments, 0x20))) point_acc_mul( add(state, STATE_LINEARISED_POLYNOMIAL_X), mPtr, - calldataload(commits_api_at_zeta), + calldataload(qcp_opening_at_zeta), add(mPtr, 0x40) ) - commits_api_at_zeta := add(commits_api_at_zeta, 0x20) - commits_api := add(commits_api, 0x40) + qcp_opening_at_zeta := add(qcp_opening_at_zeta, 0x20) + bsb_commitments := add(bsb_commitments, 0x40) } + {{ end }} mstore(mPtr, VK_S3_COM_X) mstore(add(mPtr, 0x20), VK_S3_COM_Y) @@ -1102,15 +1090,22 @@ contract PlonkVerifier { mstore(mPtr, calldataload(add(aproof, PROOF_GRAND_PRODUCT_COMMITMENT_X))) mstore(add(mPtr, 0x20), calldataload(add(aproof, PROOF_GRAND_PRODUCT_COMMITMENT_Y))) point_acc_mul(add(state, STATE_LINEARISED_POLYNOMIAL_X), mPtr, s2, add(mPtr, 0x40)) + + point_add( + add(state, STATE_LINEARISED_POLYNOMIAL_X), + add(state, STATE_LINEARISED_POLYNOMIAL_X), + add(state, STATE_FOLDED_H_X), + mPtr) } /// @notice Compute the commitment to the linearized polynomial equal to /// L(ζ)[Qₗ]+r(ζ)[Qᵣ]+R(ζ)L(ζ)[Qₘ]+O(ζ)[Qₒ]+[Qₖ]+Σᵢqc'ᵢ(ζ)[BsbCommitmentᵢ] + - /// α*( Z(μζ)(L(ζ)+β*S₁(ζ)+γ)*(R(ζ)+β*S₂(ζ)+γ)[S₃]-[Z](L(ζ)+β*id_{1}(ζ)+γ)*(R(ζ)+β*id_{2(ζ)+γ)*(O(ζ)+β*id_{3}(ζ)+γ) ) + - /// α²*L₁(ζ)[Z] + /// α*( Z(μζ)(L(ζ)+β*S₁(ζ)+γ)*(R(ζ)+β*S₂(ζ)+γ)[S₃]-[Z](L(ζ)+β*id_{1}(ζ)+γ)*(R(ζ)+β*id_{2}(ζ)+γ)*(O(ζ)+β*id_{3}(ζ)+γ) ) + + /// α²*L₁(ζ)[Z] - Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) /// where /// * id_1 = id, id_2 = vk_coset_shift*id, id_3 = vk_coset_shift^{2}*id /// * the [] means that it's a commitment (i.e. a point on Bn254(F_p)) + /// * Z_{H}(ζ) = ζ^n-1 /// @param aproof pointer to the proof function compute_commitment_linearised_polynomial(aproof) { let state := mload(0x40) @@ -1158,7 +1153,7 @@ contract PlonkVerifier { compute_commitment_linearised_polynomial_ec(aproof, s1, s2) } - /// @notice compute H₁ + ζᵐ⁺²*H₂ + ζ²⁽ᵐ⁺²⁾*H₃ and store the result at + /// @notice compute -z_h(ζ)*([H₁] + ζᵐ⁺²[H₂] + ζ²⁽ᵐ⁺²⁾[H₃]) and store the result at /// state + state_folded_h /// @param aproof pointer to the proof function fold_h(aproof) { @@ -1170,49 +1165,51 @@ contract PlonkVerifier { point_add_calldata(add(state, STATE_FOLDED_H_X), add(state, STATE_FOLDED_H_X), add(aproof, PROOF_H_1_X), mPtr) point_mul(add(state, STATE_FOLDED_H_X), add(state, STATE_FOLDED_H_X), zeta_power_n_plus_two, mPtr) point_add_calldata(add(state, STATE_FOLDED_H_X), add(state, STATE_FOLDED_H_X), add(aproof, PROOF_H_0_X), mPtr) + point_mul(add(state, STATE_FOLDED_H_X), add(state, STATE_FOLDED_H_X), mload(add(state, STATE_ZETA_POWER_N_MINUS_ONE)), mPtr) + let folded_h_y := mload(add(state, STATE_FOLDED_H_Y)) + folded_h_y := sub(P_MOD, folded_h_y) + mstore(add(state, STATE_FOLDED_H_Y), folded_h_y) } - /// @notice check that - /// L(ζ)Qₗ(ζ)+r(ζ)Qᵣ(ζ)+R(ζ)L(ζ)Qₘ(ζ)+O(ζ)Qₒ(ζ)+Qₖ(ζ)+Σᵢqc'ᵢ(ζ)BsbCommitmentᵢ(ζ) + - /// α*( Z(μζ)(l(ζ)+β*s₁(ζ)+γ)*(r(ζ)+β*s₂(ζ)+γ)*β*s₃(X)-Z(X)(l(ζ)+β*id_1(ζ)+γ)*(r(ζ)+β*id_2(ζ)+γ)*(o(ζ)+β*id_3(ζ)+γ) ) ) - /// + α²*L₁(ζ) = - /// (ζⁿ-1)H(ζ) + /// @notice check that the opening of the linearised polynomial at zeta is equal to + /// - [ PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) ] /// @param aproof pointer to the proof - function verify_quotient_poly_eval_at_zeta(aproof) { + function verify_opening_linearised_polynomial(aproof) { + let state := mload(0x40) // (l(ζ)+β*s1(ζ)+γ) - let s1 := add(mload(0x40), STATE_LAST_MEM) - mstore(s1, mulmod(calldataload(add(aproof, PROOF_S1_AT_ZETA)), mload(add(state, STATE_BETA)), R_MOD)) - mstore(s1, addmod(mload(s1), mload(add(state, STATE_GAMMA)), R_MOD)) - mstore(s1, addmod(mload(s1), calldataload(add(aproof, PROOF_L_AT_ZETA)), R_MOD)) + let s1 + s1 := mulmod(calldataload(add(aproof, PROOF_S1_AT_ZETA)), mload(add(state, STATE_BETA)), R_MOD) + s1 := addmod(s1, mload(add(state, STATE_GAMMA)), R_MOD) + s1 := addmod(s1, calldataload(add(aproof, PROOF_L_AT_ZETA)), R_MOD) // (r(ζ)+β*s2(ζ)+γ) - let s2 := add(s1, 0x20) - mstore(s2, mulmod(calldataload(add(aproof, PROOF_S2_AT_ZETA)), mload(add(state, STATE_BETA)), R_MOD)) - mstore(s2, addmod(mload(s2), mload(add(state, STATE_GAMMA)), R_MOD)) - mstore(s2, addmod(mload(s2), calldataload(add(aproof, PROOF_R_AT_ZETA)), R_MOD)) - // _s2 := mload(s2) + let s2 + s2 := mulmod(calldataload(add(aproof, PROOF_S2_AT_ZETA)), mload(add(state, STATE_BETA)), R_MOD) + s2 := addmod(s2, mload(add(state, STATE_GAMMA)), R_MOD) + s2 := addmod(s2, calldataload(add(aproof, PROOF_R_AT_ZETA)), R_MOD) // (o(ζ)+γ) - let o := add(s1, 0x40) - mstore(o, addmod(calldataload(add(aproof, PROOF_O_AT_ZETA)), mload(add(state, STATE_GAMMA)), R_MOD)) - - // α*(Z(μζ))*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(o(ζ)+γ) - mstore(s1, mulmod(mload(s1), mload(s2), R_MOD)) - mstore(s1, mulmod(mload(s1), mload(o), R_MOD)) - mstore(s1, mulmod(mload(s1), mload(add(state, STATE_ALPHA)), R_MOD)) - mstore(s1, mulmod(mload(s1), calldataload(add(aproof, PROOF_GRAND_PRODUCT_AT_ZETA_OMEGA)), R_MOD)) - - let computed_quotient := add(s1, 0x60) - - // linearizedpolynomial + pi(zeta) - mstore(computed_quotient,addmod(calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA)), mload(add(state, STATE_PI)), R_MOD)) - mstore(computed_quotient, addmod(mload(computed_quotient), mload(s1), R_MOD)) - mstore(computed_quotient,addmod(mload(computed_quotient), sub(R_MOD, mload(add(state, STATE_ALPHA_SQUARE_LAGRANGE_0))), R_MOD)) - mstore(s2,mulmod(calldataload(add(aproof, PROOF_QUOTIENT_POLYNOMIAL_AT_ZETA)),mload(add(state, STATE_ZETA_POWER_N_MINUS_ONE)),R_MOD)) + let o + o := addmod(calldataload(add(aproof, PROOF_O_AT_ZETA)), mload(add(state, STATE_GAMMA)), R_MOD) + + // α*Z(μζ)*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(o(ζ)+γ) + s1 := mulmod(s1, s2, R_MOD) + s1 := mulmod(s1, o, R_MOD) + s1 := mulmod(s1, mload(add(state, STATE_ALPHA)), R_MOD) + s1 := mulmod(s1, calldataload(add(aproof, PROOF_GRAND_PRODUCT_AT_ZETA_OMEGA)), R_MOD) + + // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + s1 := addmod(s1, mload(add(state, STATE_PI)), R_MOD) + s2 := mload(add(state, STATE_ALPHA_SQUARE_LAGRANGE_0)) + s2 := sub(R_MOD, s2) + s1 := addmod(s1, s2, R_MOD) - mstore(add(state, STATE_SUCCESS), eq(mload(computed_quotient), mload(s2))) + let opening_linearised_polynomial := mload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA)) + if eq(eq(opening_linearised_polynomial, s1), 0) { + error_verify() + } } // BEGINNING utils math functions ------------------------------------------------- @@ -1222,7 +1219,6 @@ contract PlonkVerifier { /// @param q pointer to the second point /// @param mPtr pointer to free memory function point_add(dst, p, q, mPtr) { - let state := mload(0x40) mstore(mPtr, mload(p)) mstore(add(mPtr, 0x20), mload(add(p, 0x20))) mstore(add(mPtr, 0x40), mload(q)) @@ -1238,7 +1234,6 @@ contract PlonkVerifier { /// @param q pointer to the second point (calladata) /// @param mPtr pointer to free memory function point_add_calldata(dst, p, q, mPtr) { - let state := mload(0x40) mstore(mPtr, mload(p)) mstore(add(mPtr, 0x20), mload(add(p, 0x20))) mstore(add(mPtr, 0x40), calldataload(q)) @@ -1254,7 +1249,6 @@ contract PlonkVerifier { /// @param s scalar /// @param mPtr free memory function point_mul(dst,src,s, mPtr) { - let state := mload(0x40) mstore(mPtr,mload(src)) mstore(add(mPtr,0x20),mload(add(src,0x20))) mstore(add(mPtr,0x40),s) @@ -1269,7 +1263,6 @@ contract PlonkVerifier { /// @param s scalar /// @param mPtr free memory function point_mul_calldata(dst, src, s, mPtr) { - let state := mload(0x40) mstore(mPtr, calldataload(src)) mstore(add(mPtr, 0x20), calldataload(add(src, 0x20))) mstore(add(mPtr, 0x40), s) @@ -1285,7 +1278,6 @@ contract PlonkVerifier { /// @param s scalar /// @param mPtr free memory function point_acc_mul(dst,src,s, mPtr) { - let state := mload(0x40) mstore(mPtr,mload(src)) mstore(add(mPtr,0x20),mload(add(src,0x20))) mstore(add(mPtr,0x40),s) @@ -1339,7 +1331,7 @@ contract PlonkVerifier { mstore(add(mPtr, 0xa0), R_MOD) let check_staticcall := staticcall(gas(),0x05,mPtr,0xc0,mPtr,0x20) if eq(check_staticcall, 0) { - error_verify() + } res := mload(mPtr) } @@ -1383,7 +1375,7 @@ func (proof *Proof) MarshalSolidity() []byte { // uint256 o_at_zeta; // uint256 s1_at_zeta; // uint256 s2_at_zeta; - for i := 2; i < 7; i++ { + for i := 1; i < 6; i++ { tmp32 = proof.BatchedProof.ClaimedValues[i].Bytes() res = append(res, tmp32[:]...) } @@ -1397,12 +1389,9 @@ func (proof *Proof) MarshalSolidity() []byte { tmp32 = proof.ZShiftedOpening.ClaimedValue.Bytes() res = append(res, tmp32[:]...) - // uint256 quotient_polynomial_at_zeta; // uint256 linearization_polynomial_at_zeta; tmp32 = proof.BatchedProof.ClaimedValues[0].Bytes() res = append(res, tmp32[:]...) - tmp32 = proof.BatchedProof.ClaimedValues[1].Bytes() - res = append(res, tmp32[:]...) // uint256 opening_at_zeta_proof_x; // uint256 opening_at_zeta_proof_y; @@ -1418,7 +1407,7 @@ func (proof *Proof) MarshalSolidity() []byte { // uint256[] wire_committed_commitments; if len(proof.Bsb22Commitments) > 0 { for i := 0; i < len(proof.Bsb22Commitments); i++ { - tmp32 = proof.BatchedProof.ClaimedValues[7+i].Bytes() + tmp32 = proof.BatchedProof.ClaimedValues[6+i].Bytes() res = append(res, tmp32[:]...) } From ba50ca2f341ad298b22c4f23f2c6bd3760075550 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 11:58:50 +0100 Subject: [PATCH 11/55] feat: clean MarshalSolidity --- backend/plonk/bn254/solidity.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 5bd1b0ef9a..25001c7ac2 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -1340,7 +1340,7 @@ contract PlonkVerifier { } ` -// MarshalSolidity converts a proof to a byte array that can be used in a +// MarshalSolidity convert s a proof to a byte array that can be used in a // Solidity contract. func (proof *Proof) MarshalSolidity() []byte { From d544aca57f94681637dd319b5dbe9294d244f9ff Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 12:00:31 +0100 Subject: [PATCH 12/55] fix: fixed comment derive alpha --- backend/plonk/bn254/verify.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index 4ec7c7d16e..4e78685cb9 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -80,7 +80,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] From 42f5e7196507ec6a2f718dcacae5759d93f923f6 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 12:01:28 +0100 Subject: [PATCH 13/55] fix: fixed generator --- .../backend/template/zkpschemes/plonk/plonk.verify.go.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl index 8543458c69..8cc3316896 100644 --- a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl +++ b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl @@ -63,7 +63,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] From 80f3a1ec901d6c2ef1ba9c0a2ef17a3085bd57d0 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 12:02:28 +0100 Subject: [PATCH 14/55] feat: code gen --- backend/plonk/bls12-377/verify.go | 2 +- backend/plonk/bls12-381/verify.go | 2 +- backend/plonk/bls24-315/verify.go | 2 +- backend/plonk/bls24-317/verify.go | 2 +- backend/plonk/bw6-633/verify.go | 2 +- backend/plonk/bw6-761/verify.go | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/backend/plonk/bls12-377/verify.go b/backend/plonk/bls12-377/verify.go index dba02ac50a..ad5cfd8155 100644 --- a/backend/plonk/bls12-377/verify.go +++ b/backend/plonk/bls12-377/verify.go @@ -80,7 +80,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] diff --git a/backend/plonk/bls12-381/verify.go b/backend/plonk/bls12-381/verify.go index 559f4cb6e3..6933e2fc94 100644 --- a/backend/plonk/bls12-381/verify.go +++ b/backend/plonk/bls12-381/verify.go @@ -80,7 +80,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] diff --git a/backend/plonk/bls24-315/verify.go b/backend/plonk/bls24-315/verify.go index 64c8f0a0ea..4f51ac2a05 100644 --- a/backend/plonk/bls24-315/verify.go +++ b/backend/plonk/bls24-315/verify.go @@ -80,7 +80,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] diff --git a/backend/plonk/bls24-317/verify.go b/backend/plonk/bls24-317/verify.go index 5640be8848..4451733110 100644 --- a/backend/plonk/bls24-317/verify.go +++ b/backend/plonk/bls24-317/verify.go @@ -80,7 +80,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] diff --git a/backend/plonk/bw6-633/verify.go b/backend/plonk/bw6-633/verify.go index 27440459b3..54355a85dd 100644 --- a/backend/plonk/bw6-633/verify.go +++ b/backend/plonk/bw6-633/verify.go @@ -80,7 +80,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] diff --git a/backend/plonk/bw6-761/verify.go b/backend/plonk/bw6-761/verify.go index 96553b46c4..1e53ba4056 100644 --- a/backend/plonk/bw6-761/verify.go +++ b/backend/plonk/bw6-761/verify.go @@ -80,7 +80,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac return err } - // derive alpha from Comm(l), Comm(r), Comm(o), Com(Z), Bsb22Commitments + // derive alpha from Com(Z), Bsb22Commitments alphaDeps := make([]*curve.G1Affine, len(proof.Bsb22Commitments)+1) for i := range proof.Bsb22Commitments { alphaDeps[i] = &proof.Bsb22Commitments[i] From c0e403ba77e871d5d05cdce9026ddc04084c6c94 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 12:08:59 +0100 Subject: [PATCH 15/55] fix: fixed proof size --- backend/plonk/bn254/solidity.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 25001c7ac2..66bfc6a5e2 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -318,7 +318,7 @@ contract PlonkVerifier { /// Checks if the proof is of the correct size /// @param actual_proof_size size of the proof (not the expected size) function check_proof_size(actual_proof_size) { - let expected_proof_size := add(0x340, mul(VK_NB_CUSTOM_GATES,0x60)) + let expected_proof_size := add(0x320, mul(VK_NB_CUSTOM_GATES,0x60)) if iszero(eq(actual_proof_size, expected_proof_size)) { error_proof_size() } From f39e31e5a06e6644e06c1e5d14c68bc46d91ac2c Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 17:03:39 +0100 Subject: [PATCH 16/55] fix: fixed verify_opening_linearised_polynomial --- backend/plonk/bn254/solidity.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 66bfc6a5e2..9d1c569597 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -1205,9 +1205,10 @@ contract PlonkVerifier { s2 := mload(add(state, STATE_ALPHA_SQUARE_LAGRANGE_0)) s2 := sub(R_MOD, s2) s1 := addmod(s1, s2, R_MOD) + s1 := sub(R_MOD, s1) - let opening_linearised_polynomial := mload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA)) - if eq(eq(opening_linearised_polynomial, s1), 0) { + let opening_linearised_polynomial := calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA)) + if iszero(eq(opening_linearised_polynomial, s1)) { error_verify() } } From e84d0839eb2efa1e75ad142a79634a7463d005b4 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 17:59:18 +0100 Subject: [PATCH 17/55] feat: pairing precompile error handled --- backend/plonk/bn254/solidity.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 9d1c569597..b92d3484cd 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -274,6 +274,15 @@ contract PlonkVerifier { revert(ptError, 0x64) } + function error_pairing() { + let ptError := mload(0x40) + mstore(ptError, ERROR_STRING_ID) // selector for function Error(string) + mstore(add(ptError, 0x4), 0x20) + mstore(add(ptError, 0x24), 0xd) + mstore(add(ptError, 0x44), "error pairing") + revert(ptError, 0x64) + } + function error_verify() { let ptError := mload(0x40) mstore(ptError, ERROR_STRING_ID) // selector for function Error(string) @@ -892,9 +901,10 @@ contract PlonkVerifier { // TODO test the staticcall using the method from audit_4-5 let l_success := staticcall(gas(), 8, mPtr, 0x180, 0x00, 0x20) + if iszero(l_succes) { + error_pairing() + } let res_pairing := mload(0x00) - let s_success := mload(add(state, STATE_SUCCESS)) - res_pairing := and(and(res_pairing, l_success), s_success) mstore(add(state, STATE_SUCCESS), res_pairing) } From 99398b20cf96beeaf8383e383b817f9fff5baf46 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 18:01:25 +0100 Subject: [PATCH 18/55] fix: fixed typo --- backend/plonk/bn254/solidity.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index b92d3484cd..5a1ff154e1 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -901,7 +901,7 @@ contract PlonkVerifier { // TODO test the staticcall using the method from audit_4-5 let l_success := staticcall(gas(), 8, mPtr, 0x180, 0x00, 0x20) - if iszero(l_succes) { + if iszero(l_success) { error_pairing() } let res_pairing := mload(0x00) From d87ca3c9b44b4f7f80f10f6aef70e0b593f87686 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 18:51:25 +0100 Subject: [PATCH 19/55] fix: fixed offset opening at zeta-omega --- backend/plonk/bn254/solidity.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 5a1ff154e1..27ed2b04b6 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -103,7 +103,7 @@ contract PlonkVerifier { uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_X = 0x2a0; uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_Y = 0x2c0; - uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_X = 0x3e0; + uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_X = 0x2e0; uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_Y = 0x300; uint256 private constant PROOF_OPENING_QCP_AT_ZETA = 0x320; From 5f060136511e4eafa8dec59ba31c6ce1e4f481ef Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 26 Jan 2024 19:05:20 +0100 Subject: [PATCH 20/55] fix: fixed compute_gamma_kzg --- backend/plonk/bn254/solidity.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 27ed2b04b6..ecac00e646 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -202,10 +202,10 @@ contract PlonkVerifier { mstore(add(mem, STATE_ZETA_POWER_N_MINUS_ONE), zeta_power_n_minus_one) // public inputs contribution - let l_wocommit := sum_pi_wo_api_commit(public_inputs.offset, public_inputs.length, freeMem) + let l_pi := sum_pi_wo_api_commit(public_inputs.offset, public_inputs.length, freeMem) {{ if (gt (len .CommitmentConstraintIndexes) 0 ) -}} - let l_pi := sum_pi_commit(proof.offset, public_inputs.length, freeMem) - l_pi := addmod(l_wocommit, l_pi, R_MOD) + let l_pi_commit := sum_pi_commit(proof.offset, public_inputs.length, freeMem) + l_pi := addmod(l_pi_commit, l_pi, R_MOD) {{ end -}} mstore(add(mem, STATE_PI), l_pi) @@ -995,9 +995,11 @@ contract PlonkVerifier { let offset := 0x1c0 - mstore(add(mPtr,offset), VK_QCP_0_X) - mstore(add(mPtr,add(offset, 0x20)), VK_QCP_0_Y) + {{ range $index, $element := .CommitmentConstraintIndexes }} + mstore(add(mPtr,offset), VK_QCP_{{ $index }}_X) + mstore(add(mPtr,add(offset, 0x20)), VK_QCP_{{ $index }}_Y) offset := add(offset, 0x40) + {{ end }} mstore(add(mPtr, offset), calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA))) mstore(add(mPtr, add(offset, 0x20)), calldataload(add(aproof, PROOF_L_AT_ZETA))) From 019da3de466a72ba80996cdc627e3cb04814fa43 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 2 Feb 2024 17:20:56 +0100 Subject: [PATCH 21/55] feat: modified algebraic relation --- std/recursion/plonk/verifier.go | 55 ++++++++++++++++----------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/std/recursion/plonk/verifier.go b/std/recursion/plonk/verifier.go index 67a597de09..11b70926e6 100644 --- a/std/recursion/plonk/verifier.go +++ b/std/recursion/plonk/verifier.go @@ -262,7 +262,7 @@ func PlaceholderProof[FR emulated.FieldParams, G1El algebra.G1ElementT, G2El alg nbCommitments := len(ccs.GetCommitments().CommitmentIndexes()) ret := Proof[FR, G1El, G2El]{ BatchedProof: kzg.BatchOpeningProof[FR, G1El]{ - ClaimedValues: make([]emulated.Element[FR], 7+nbCommitments), + ClaimedValues: make([]emulated.Element[FR], 6+nbCommitments), }, Bsb22Commitments: make([]kzg.Commitment[G1El], nbCommitments), } @@ -828,20 +828,19 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, return nil, nil, nil, err } - // evaluation of Z=Xⁿ-1 at ζ + // evaluation of zhZetaZ=ζⁿ-1 one := v.scalarApi.One() - zetaPowerM := v.fixedExpN(vk.Size, zeta) // ζⁿ - zetaPowerMMinusOne := v.scalarApi.Sub(zetaPowerM, one) // ζⁿ-1 + zetaPowerM := v.fixedExpN(vk.Size, zeta) // ζⁿ + zhZeta := v.scalarApi.Sub(zetaPowerM, one) // ζⁿ-1 // L1 = (1/n)(ζⁿ-1)/(ζ-1) denom := v.scalarApi.Sub(zeta, one) - lagrangeOne := v.scalarApi.Div(zetaPowerMMinusOne, denom) + lagrangeOne := v.scalarApi.Div(zhZeta, denom) lagrangeOne = v.scalarApi.Mul(lagrangeOne, &vk.SizeInv) lagrange := lagrangeOne + // compute PI = ∑_{i Date: Fri, 2 Feb 2024 17:23:17 +0100 Subject: [PATCH 22/55] fix: fixed formula in comments --- backend/plonk/bls12-377/verify.go | 2 +- backend/plonk/bls12-381/verify.go | 2 +- backend/plonk/bls24-315/verify.go | 2 +- backend/plonk/bls24-317/verify.go | 2 +- backend/plonk/bn254/verify.go | 2 +- backend/plonk/bw6-633/verify.go | 2 +- backend/plonk/bw6-761/verify.go | 2 +- .../zkpschemes/plonk/plonk.verify.go.tmpl | 2 +- std/recursion/plonk/verifier_test.go | 26 ++++++++++++------- 9 files changed, 24 insertions(+), 18 deletions(-) diff --git a/backend/plonk/bls12-377/verify.go b/backend/plonk/bls12-377/verify.go index ad5cfd8155..3a06c2767f 100644 --- a/backend/plonk/bls12-377/verify.go +++ b/backend/plonk/bls12-377/verify.go @@ -204,7 +204,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/backend/plonk/bls12-381/verify.go b/backend/plonk/bls12-381/verify.go index 6933e2fc94..3a6eaffbff 100644 --- a/backend/plonk/bls12-381/verify.go +++ b/backend/plonk/bls12-381/verify.go @@ -204,7 +204,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/backend/plonk/bls24-315/verify.go b/backend/plonk/bls24-315/verify.go index 4f51ac2a05..ef86f4a0c4 100644 --- a/backend/plonk/bls24-315/verify.go +++ b/backend/plonk/bls24-315/verify.go @@ -204,7 +204,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/backend/plonk/bls24-317/verify.go b/backend/plonk/bls24-317/verify.go index 4451733110..75ab3d60be 100644 --- a/backend/plonk/bls24-317/verify.go +++ b/backend/plonk/bls24-317/verify.go @@ -204,7 +204,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index 4e78685cb9..5ab6573b31 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -204,7 +204,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/backend/plonk/bw6-633/verify.go b/backend/plonk/bw6-633/verify.go index 54355a85dd..1f58399475 100644 --- a/backend/plonk/bw6-633/verify.go +++ b/backend/plonk/bw6-633/verify.go @@ -204,7 +204,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/backend/plonk/bw6-761/verify.go b/backend/plonk/bw6-761/verify.go index 1e53ba4056..432711cb02 100644 --- a/backend/plonk/bw6-761/verify.go +++ b/backend/plonk/bw6-761/verify.go @@ -204,7 +204,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl index 8cc3316896..d6e81a07cb 100644 --- a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl +++ b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl @@ -187,7 +187,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) // where - // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s1 = α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(β)+γ)*β*Z(μζ) diff --git a/std/recursion/plonk/verifier_test.go b/std/recursion/plonk/verifier_test.go index b21c4a75d5..f07e4e5a42 100644 --- a/std/recursion/plonk/verifier_test.go +++ b/std/recursion/plonk/verifier_test.go @@ -84,27 +84,33 @@ func getInnerWoCommit(assert *test.Assert, field, outer *big.Int) (constraint.Co func TestBLS12InBW6WoCommit(t *testing.T) { assert := test.NewAssert(t) - innerCcs, innerVK, innerWitness, innerProof := getInnerWoCommit(assert, ecc.BLS12_377.ScalarField(), ecc.BW6_761.ScalarField()) + // innerCcs, innerVK, innerWitness, innerProof := getInnerWoCommit(assert, ecc.BLS12_377.ScalarField(), ecc.BW6_761.ScalarField()) + innerCcs, innerVK, _, _ := getInnerWoCommit(assert, ecc.BLS12_377.ScalarField(), ecc.BW6_761.ScalarField()) // outer proof circuitVk, err := ValueOfVerifyingKey[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerVK) assert.NoError(err) - circuitWitness, err := ValueOfWitness[sw_bls12377.ScalarField](innerWitness) - assert.NoError(err) - circuitProof, err := ValueOfProof[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerProof) - assert.NoError(err) + // circuitWitness, err := ValueOfWitness[sw_bls12377.ScalarField](innerWitness) + // assert.NoError(err) + // circuitProof, err := ValueOfProof[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerProof) + // assert.NoError(err) outerCircuit := &OuterCircuit[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine, sw_bls12377.GT]{ InnerWitness: PlaceholderWitness[sw_bls12377.ScalarField](innerCcs), Proof: PlaceholderProof[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerCcs), VerifyingKey: circuitVk, } - outerAssignment := &OuterCircuit[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine, sw_bls12377.GT]{ - InnerWitness: circuitWitness, - Proof: circuitProof, - } - err = test.IsSolved(outerCircuit, outerAssignment, ecc.BW6_761.ScalarField()) + ccs, err := frontend.Compile(ecc.BW6_761.ScalarField(), scs.NewBuilder, outerCircuit) assert.NoError(err) + nbConstraints := ccs.GetNbConstraints() + fmt.Printf("nb constraints: %d\n", nbConstraints) + // outerAssignment := &OuterCircuit[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine, sw_bls12377.GT]{ + // InnerWitness: circuitWitness, + // Proof: circuitProof, + // } + // err = test.IsSolved(outerCircuit, outerAssignment, ecc.BW6_761.ScalarField()) + // assert.NoError(err) + } func TestBW6InBN254WoCommit(t *testing.T) { From 5ee479df8f4777b2c8341bd81f57978a3b319dac Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Fri, 2 Feb 2024 23:00:18 +0100 Subject: [PATCH 23/55] fix: fixed size slice --- std/recursion/plonk/verifier.go | 153 +++++++++++++------------------- 1 file changed, 64 insertions(+), 89 deletions(-) diff --git a/std/recursion/plonk/verifier.go b/std/recursion/plonk/verifier.go index 11b70926e6..49f4638d7b 100644 --- a/std/recursion/plonk/verifier.go +++ b/std/recursion/plonk/verifier.go @@ -830,8 +830,8 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, // evaluation of zhZetaZ=ζⁿ-1 one := v.scalarApi.One() - zetaPowerM := v.fixedExpN(vk.Size, zeta) // ζⁿ - zhZeta := v.scalarApi.Sub(zetaPowerM, one) // ζⁿ-1 + zetaPowerN := v.fixedExpN(vk.Size, zeta) // ζⁿ + zhZeta := v.scalarApi.Sub(zetaPowerN, one) // ζⁿ-1 // L1 = (1/n)(ζⁿ-1)/(ζ-1) denom := v.scalarApi.Sub(zeta, one) @@ -868,7 +868,7 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, return nil, nil, nil, err } for i := range vk.CommitmentConstraintIndexes { - li := v.computeIthLagrangeAtZeta(v.api.Add(vk.CommitmentConstraintIndexes[i], vk.NbPublicVariables), zeta, zetaPowerM, vk) + li := v.computeIthLagrangeAtZeta(v.api.Add(vk.CommitmentConstraintIndexes[i], vk.NbPublicVariables), zeta, zetaPowerN, vk) marshalledCommitment := v.curve.MarshalG1(proof.Bsb22Commitments[i].G1El) hashToField.Write(marshalledCommitment...) hashedCmt := hashToField.Sum() @@ -898,21 +898,21 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) // _s1 = (l(ζ)+β*s1(ζ)+γ) - _s1 := v.scalarApi.Mul(&s1, beta) - _s1 = v.scalarApi.Add(_s1, &l) - _s1 = v.scalarApi.Add(_s1, gamma) + lPlusBetaS1PlusGamma := v.scalarApi.Mul(&s1, beta) + lPlusBetaS1PlusGamma = v.scalarApi.Add(lPlusBetaS1PlusGamma, &l) + lPlusBetaS1PlusGamma = v.scalarApi.Add(lPlusBetaS1PlusGamma, gamma) // _s2 = (r(ζ)+β*s2(ζ)+γ) - _s2 := v.scalarApi.Mul(&s2, beta) - _s2 = v.scalarApi.Add(_s2, &r) - _s2 = v.scalarApi.Add(_s2, gamma) + rPlusBetaS2PlusGamma := v.scalarApi.Mul(&s2, beta) + rPlusBetaS2PlusGamma = v.scalarApi.Add(rPlusBetaS2PlusGamma, &r) + rPlusBetaS2PlusGamma = v.scalarApi.Add(rPlusBetaS2PlusGamma, gamma) // _o = (o(ζ)+γ) _o := v.scalarApi.Add(&o, gamma) // _s1 = α*(Z(μζ))*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*(o(ζ)+γ) - _s1 = v.scalarApi.Mul(_s1, _s2) - _s1 = v.scalarApi.Mul(_s1, _o) + lPlusBetaS1PlusGammaTimesRPlusBetaS2PlusGamma := v.scalarApi.Mul(lPlusBetaS1PlusGamma, rPlusBetaS2PlusGamma) + _s1 := v.scalarApi.Mul(lPlusBetaS1PlusGammaTimesRPlusBetaS2PlusGamma, _o) _s1 = v.scalarApi.Mul(_s1, alpha) _s1 = v.scalarApi.Mul(_s1, &zu) @@ -924,68 +924,45 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, openingLinPol := proof.BatchedProof.ClaimedValues[0] v.scalarApi.AssertIsEqual(&openingLinPol, constLin) - // compute the folded commitment to H: Comm(h₁) + ζᵐ⁺²*Comm(h₂) + ζ²⁽ᵐ⁺²⁾*Comm(h₃) - zetaMPlusTwo := v.scalarApi.Mul(zetaPowerM, zeta) - zetaMPlusTwo = v.scalarApi.Mul(zetaMPlusTwo, zeta) - zetaMPlusTwoSquare := v.scalarApi.Mul(zetaMPlusTwo, zetaMPlusTwo) - - foldedH, err := v.curve.MultiScalarMul([]*G1El{&proof.H[2].G1El, &proof.H[1].G1El}, []*emulated.Element[FR]{zetaMPlusTwoSquare, zetaMPlusTwo}) - if err != nil { - return nil, nil, nil, fmt.Errorf("folded proof MSM: %w", err) - } - foldedH = v.curve.Add(foldedH, &proof.H[0].G1El) - - // Compute the commitment to the linearized polynomial - // linearizedPolynomialDigest = - // l(ζ)*ql+r(ζ)*qr+r(ζ)l(ζ)*qm+o(ζ)*qo+qk+Σᵢqc'ᵢ(ζ)*BsbCommitmentᵢ + - // α*( Z(μζ)(l(ζ)+β*s₁(ζ)+γ)*(r(ζ)+β*s₂(ζ)+γ)*s₃(X)-Z(X)(l(ζ)+β*id_1(ζ)+γ)*(r(ζ)+β*id_2(ζ)+γ)*(o(ζ)+β*id_3(ζ)+γ) ) + - // α²*L₁(ζ)*Z - - // second part: α*( Z(μζ)(l(ζ)+β*s₁(ζ)+γ)*(r(ζ)+β*s₂(ζ)+γ)*β*s₃(X)-Z(X)(l(ζ)+β*id_1(ζ)+γ)*(r(ζ)+β*id_2(ζ)+γ)*(o(ζ)+β*id_3(ζ)+γ) ) ) - - uu := v.scalarApi.Mul(&zu, beta) - - vv := v.scalarApi.Mul(beta, &s1) - vv = v.scalarApi.Add(vv, &l) - vv = v.scalarApi.Add(vv, gamma) - - ww := v.scalarApi.Mul(beta, &s2) - ww = v.scalarApi.Add(ww, &r) - ww = v.scalarApi.Add(ww, gamma) - - // α*Z(μζ)(l(ζ)+β*s₁(ζ)+γ)*(r(ζ)+β*s₂(ζ)+γ)*β - _s1 = v.scalarApi.Mul(uu, vv) - _s1 = v.scalarApi.Mul(_s1, ww) - _s1 = v.scalarApi.Mul(_s1, alpha) - - cosetsquare := v.scalarApi.Mul(&vk.CosetShift, &vk.CosetShift) - - // (l(ζ)+β*ζ+γ) - uu = v.scalarApi.Mul(beta, zeta) - vv = uu - ww = uu - uu = v.scalarApi.Add(uu, &l) - uu = v.scalarApi.Add(uu, gamma) - - // (r(ζ)+β*μ*ζ+γ) - vv = v.scalarApi.Mul(vv, &vk.CosetShift) - vv = v.scalarApi.Add(vv, &r) - vv = v.scalarApi.Add(vv, gamma) - - // (o(ζ)+β*μ²*ζ+γ) - ww = v.scalarApi.Mul(ww, cosetsquare) - ww = v.scalarApi.Add(ww, &o) - ww = v.scalarApi.Add(ww, gamma) - - // -(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - _s2 = v.scalarApi.Mul(uu, vv) - _s2 = v.scalarApi.Mul(_s2, ww) - _s2 = v.scalarApi.Neg(_s2) - - // note since third part = α²*L₁(ζ)*Z - // -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + α²*L₁(ζ) - _s2 = v.scalarApi.Mul(_s2, alpha) - _s2 = v.scalarApi.Add(_s2, alphaSquareLagrange) + // computing the linearised polynomial digest + // α²*L₁(ζ)*[Z] + + // _s1*[s3]+_s2*[Z] + l(ζ)*[Ql] + + // l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - + // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) + // where + // _s1 = α*(l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + // _s2 = -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + + _s1 = v.scalarApi.Mul(lPlusBetaS1PlusGammaTimesRPlusBetaS2PlusGamma, beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β + _s1 = v.scalarApi.Mul(_s1, &zu) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + + betaZeta := v.scalarApi.Mul(beta, zeta) // β*ζ + _s2 := v.scalarApi.Add(&l, betaZeta) // l(ζ)+β*ζ + _s2 = v.scalarApi.Add(_s2, gamma) // (l(ζ)+β*ζ+γ) + betaZetaCosetShift := v.scalarApi.Mul(betaZeta, &vk.CosetShift) // u*β*ζ + tmp := v.scalarApi.Add(&r, betaZetaCosetShift) // r(ζ)+β*u*ζ + tmp = v.scalarApi.Add(tmp, gamma) // r(ζ)+β*u*ζ+γ + _s2 = v.scalarApi.Mul(_s2, tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ) + betaZetaCosetShift = v.scalarApi.Mul(betaZetaCosetShift, &vk.CosetShift) // β*u²*ζ + tmp = v.scalarApi.Add(betaZetaCosetShift, gamma) // β*u²*ζ+γ + tmp = v.scalarApi.Add(tmp, &o) // β*u²*ζ+γ+o + _s2 = v.scalarApi.Mul(_s2, tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)(o+β*u²*ζ+γ) + + _s2 = v.scalarApi.Sub(_s1, _s2) + _s2 = v.scalarApi.Mul(_s1, alpha) // _s1 = α*[(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) - (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)] + + // α²*L₁(ζ) - α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) + coeffZ := v.scalarApi.Add(alphaSquareLagrangeOne, _s2) + + // l(ζ)*r(ζ) + rl := v.scalarApi.Mul(&l, &r) + + // -ζⁿ⁺²*(ζⁿ-1), -ζ²⁽ⁿ⁺²⁾*(ζⁿ-1), -(ζⁿ-1) + zhZeta = v.scalarApi.Neg(zhZeta) // -(ζⁿ-1) + zetaPowerNPlusTwo := v.scalarApi.Mul(zeta, zetaPowerN) + zetaPowerNPlusTwo = v.scalarApi.Mul(zeta, zetaPowerNPlusTwo) // ζⁿ⁺² + zetaNPlusTwoZh := v.scalarApi.Mul(zetaPowerNPlusTwo, zhZeta) // -ζⁿ⁺²*(ζⁿ-1) + zetaNPlusTwoSquareZh := v.scalarApi.Mul(zetaPowerNPlusTwo, zetaNPlusTwoZh) // -ζ²⁽ⁿ⁺²⁾*(ζⁿ-1) points := make([]*G1El, len(proof.Bsb22Commitments)) for i := range proof.Bsb22Commitments { @@ -993,17 +970,16 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, } points = append(points, &vk.Ql.G1El, &vk.Qr.G1El, &vk.Qm.G1El, &vk.Qo.G1El, // first part - &vk.S[2].G1El, &proof.Z.G1El, // second & third part + &vk.S[2].G1El, &proof.Z.G1El, &proof.H[0].G1El, &proof.H[1].G1El, &proof.H[2].G1El, // second & third part ) qC := make([]*emulated.Element[FR], len(proof.Bsb22Commitments)) - for i := range proof.BatchedProof.ClaimedValues[7:] { - qC[i] = &proof.BatchedProof.ClaimedValues[7+i] + for i := range proof.BatchedProof.ClaimedValues[6:] { + qC[i] = &proof.BatchedProof.ClaimedValues[6+i] } - rl := v.scalarApi.Mul(&r, &l) scalars := append(qC, &l, &r, rl, &o, // first part - _s1, _s2, // second & third part + _s1, coeffZ, zhZeta, zetaNPlusTwoSquareZh, zetaNPlusTwoSquareZh, // second & third part ) var msmOpts []algopts.AlgebraOption @@ -1022,15 +998,14 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, } // Fold the first proof - digestsToFold := make([]kzg.Commitment[G1El], len(vk.Qcp)+7) - copy(digestsToFold[7:], vk.Qcp) - digestsToFold[0] = kzg.Commitment[G1El]{G1El: *foldedH} - digestsToFold[1] = kzg.Commitment[G1El]{G1El: *linearizedPolynomialDigest} - digestsToFold[2] = proof.LRO[0] - digestsToFold[3] = proof.LRO[1] - digestsToFold[4] = proof.LRO[2] - digestsToFold[5] = vk.S[0] - digestsToFold[6] = vk.S[1] + digestsToFold := make([]kzg.Commitment[G1El], len(vk.Qcp)+6) + copy(digestsToFold[6:], vk.Qcp) + digestsToFold[0] = kzg.Commitment[G1El]{G1El: *linearizedPolynomialDigest} + digestsToFold[1] = proof.LRO[0] + digestsToFold[2] = proof.LRO[1] + digestsToFold[3] = proof.LRO[2] + digestsToFold[4] = vk.S[0] + digestsToFold[5] = vk.S[1] foldedProof, foldedDigest, err := v.kzg.FoldProof( digestsToFold, proof.BatchedProof, @@ -1205,12 +1180,12 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) fixedExpN(n frontend.Variable, s *emula } // computeIthLagrangeAtZeta computes L_{i}(\omega) = \omega^{i}/n (\zeta^{n}-1)/(\zeta-\omega^{i}) -func (v *Verifier[FR, G1El, G2El, GtEl]) computeIthLagrangeAtZeta(exp frontend.Variable, zeta, zetaPowerM *emulated.Element[FR], vk VerifyingKey[FR, G1El, G2El]) *emulated.Element[FR] { +func (v *Verifier[FR, G1El, G2El, GtEl]) computeIthLagrangeAtZeta(exp frontend.Variable, zeta, zetaPowerN *emulated.Element[FR], vk VerifyingKey[FR, G1El, G2El]) *emulated.Element[FR] { // assume circuit of maximum size 2**30. const maxExpBits = 30 one := v.scalarApi.One() - num := v.scalarApi.Sub(zetaPowerM, one) + num := v.scalarApi.Sub(zetaPowerN, one) // \omega^{i} iBits := bits.ToBinary(v.api, exp, bits.WithNbDigits(maxExpBits)) From 3abd77d855061463da382b61e0e6299934cb82e4 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Mon, 5 Feb 2024 21:47:40 +0100 Subject: [PATCH 24/55] fix: verifier works --- std/recursion/plonk/verifier.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/std/recursion/plonk/verifier.go b/std/recursion/plonk/verifier.go index 49f4638d7b..0e1a38bb5e 100644 --- a/std/recursion/plonk/verifier.go +++ b/std/recursion/plonk/verifier.go @@ -935,6 +935,7 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, _s1 = v.scalarApi.Mul(lPlusBetaS1PlusGammaTimesRPlusBetaS2PlusGamma, beta) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β _s1 = v.scalarApi.Mul(_s1, &zu) // (l(ζ)+β*s1(β)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) + _s1 = v.scalarApi.Mul(_s1, alpha) // α*(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) betaZeta := v.scalarApi.Mul(beta, zeta) // β*ζ _s2 := v.scalarApi.Add(&l, betaZeta) // l(ζ)+β*ζ @@ -947,9 +948,8 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, tmp = v.scalarApi.Add(betaZetaCosetShift, gamma) // β*u²*ζ+γ tmp = v.scalarApi.Add(tmp, &o) // β*u²*ζ+γ+o _s2 = v.scalarApi.Mul(_s2, tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)(o+β*u²*ζ+γ) - - _s2 = v.scalarApi.Sub(_s1, _s2) - _s2 = v.scalarApi.Mul(_s1, alpha) // _s1 = α*[(l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*β*Z(μζ) - (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ)] + _s2 = v.scalarApi.Mul(_s2, alpha) // α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)(o+β*u²*ζ+γ) + _s2 = v.scalarApi.Neg(_s2) // -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)(o+β*u²*ζ+γ) // α²*L₁(ζ) - α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) coeffZ := v.scalarApi.Add(alphaSquareLagrangeOne, _s2) @@ -979,7 +979,7 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, } scalars := append(qC, &l, &r, rl, &o, // first part - _s1, coeffZ, zhZeta, zetaNPlusTwoSquareZh, zetaNPlusTwoSquareZh, // second & third part + _s1, coeffZ, zhZeta, zetaNPlusTwoZh, zetaNPlusTwoSquareZh, // second & third part ) var msmOpts []algopts.AlgebraOption From 3b3c1e89a35fc32aec1394fbe6dad4bae940e6f2 Mon Sep 17 00:00:00 2001 From: Arya Tabaie Date: Mon, 1 Apr 2024 18:20:41 -0400 Subject: [PATCH 25/55] Refac/compressor nodictref2 (#1091) * bench compressor constraints * feat numreader increase nbWords * test setNumNbBits * refac remove dictref * fix updating dynamic br * fix no compression level * fix test1One works * fix TestNoCompression passes * fix inIDelta * build update compress version * bench smaller decompressed size * perf only allow two backref types * test count constraints * perf optimize reading of backref length * perf one constraint per br detection * bench test for 700 and 800 KB * fix support empty payload * style remove deadcode --- go.mod | 6 +- go.sum | 10 +- std/compress/io.go | 20 ++++ std/compress/io_test.go | 85 ++++++++++++++ std/compress/lzss/large-tests/main.go | 54 +++++++++ std/compress/lzss/snark.go | 111 +++++++++--------- std/compress/lzss/snark_test.go | 156 +++++++++++++++++++++----- std/compress/lzss/snark_testing.go | 8 +- 8 files changed, 360 insertions(+), 90 deletions(-) create mode 100644 std/compress/lzss/large-tests/main.go diff --git a/go.mod b/go.mod index 2a653372d0..b1c3719075 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,14 @@ module github.com/consensys/gnark -go 1.20 +go 1.21 + +toolchain go1.21.4 require ( github.com/bits-and-blooms/bitset v1.8.0 github.com/blang/semver/v4 v4.0.0 github.com/consensys/bavard v0.1.13 - github.com/consensys/compress v0.2.3 + github.com/consensys/compress v0.2.5-0.20240302001543-8bd32cc03c5a github.com/consensys/gnark-crypto v0.12.2-0.20240215234832-d72fcb379d3e github.com/fxamacker/cbor/v2 v2.5.0 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index b1e585676e..25931720e9 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,10 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/compress v0.2.3 h1:B34qdHCg2t9Ikd2jS2UnrNZPG9I3PTBet6f5fHmG7to= -github.com/consensys/compress v0.2.3/go.mod h1:Ne8+cGKjqgjF1dlHapZx38pHzWpaBYhsKxQa+JPl0zM= +github.com/consensys/compress v0.2.5-0.20240302001148-4e4b235ce59a h1:hU8ImZi7a+rpSQfEXICzXhIBXksjTEqm/rshKarNOwQ= +github.com/consensys/compress v0.2.5-0.20240302001148-4e4b235ce59a/go.mod h1:pyM+ZXiNUh7/0+AUjUf9RKUM6vSH7T/fsn5LLS0j1Tk= +github.com/consensys/compress v0.2.5-0.20240302001543-8bd32cc03c5a h1:cCYsbALfz+DjWfoRZZxBSlOq4fnqfzA8noHMzH6jP74= +github.com/consensys/compress v0.2.5-0.20240302001543-8bd32cc03c5a/go.mod h1:pyM+ZXiNUh7/0+AUjUf9RKUM6vSH7T/fsn5LLS0j1Tk= github.com/consensys/gnark-crypto v0.12.2-0.20240215234832-d72fcb379d3e h1:MKdOuCiy2DAX1tMp2YsmtNDaqdigpY6B5cZQDJ9BvEo= github.com/consensys/gnark-crypto v0.12.2-0.20240215234832-d72fcb379d3e/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -28,7 +30,9 @@ github.com/ingonyama-zk/icicle v0.0.0-20230928131117-97f0079e5c71/go.mod h1:kAK8 github.com/ingonyama-zk/iciclegnark v0.1.0 h1:88MkEghzjQBMjrYRJFxZ9oR9CTIpB8NG2zLeCJSvXKQ= github.com/ingonyama-zk/iciclegnark v0.1.0/go.mod h1:wz6+IpyHKs6UhMMoQpNqz1VY+ddfKqC/gRwR/64W6WU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -67,7 +71,9 @@ golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= diff --git a/std/compress/io.go b/std/compress/io.go index fdf0c0459b..bc7a848601 100644 --- a/std/compress/io.go +++ b/std/compress/io.go @@ -176,6 +176,26 @@ func NewNumReader(api frontend.API, toRead []frontend.Variable, numNbBits, wordN } } +func (nr *NumReader) SetNumNbBits(numNbBits int) { + wordNbBits := nr.radix.BitLen() - 1 // TODO check + wordsPerNum := numNbBits / wordNbBits + if wordsPerNum*wordNbBits != numNbBits { + panic("numNbBits must be divisible by wordNbBits") + } + if wordsPerNum < nr.wordsPerNum { + panic("decreasing wordsPerNum not supported") + } + + if nr.last != nil { // nothing to compensate for if no values have yet been read + nbToRead := min(len(nr.toRead), wordsPerNum-nr.wordsPerNum) + delta := ReadNum(nr.api, nr.toRead[:nbToRead], nr.radix) + nr.toRead = nr.toRead[:nbToRead] + nr.last = nr.api.Add(nr.api.Mul(nr.last, twoPow(wordsPerNum-nr.wordsPerNum)), delta) + } + + nr.wordsPerNum, nr.numBound = wordsPerNum, twoPow(numNbBits) +} + func twoPow(n int) *big.Int { res := big.NewInt(1) return res.Lsh(res, uint(n)) diff --git a/std/compress/io_test.go b/std/compress/io_test.go index d69813b063..cbe17e7a73 100644 --- a/std/compress/io_test.go +++ b/std/compress/io_test.go @@ -2,6 +2,7 @@ package compress import ( "crypto/rand" + "errors" "fmt" "github.com/consensys/gnark-crypto/ecc" "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" @@ -117,3 +118,87 @@ func (c *checksumTestCircuit) Define(api frontend.API) error { Packed := append(Pack(api, c.Bytes, 8), len(c.Bytes)) return AssertChecksumEquals(api, Packed, c.Sum) } + +func TestSetNumNbBits(t *testing.T) { + + runTest := func(words, increases []byte, nums []uint64) { + test.NewAssert(t).CheckCircuit( + &testSetNumNbBitsCircuit{ + increases: increases, + Words: make([]frontend.Variable, len(words)), + Nums: make([]frontend.Variable, len(nums)), + }, + test.WithCurves(ecc.BLS12_377), test.WithBackends(backend.PLONK), + test.WithValidAssignment(&testSetNumNbBitsCircuit{ + increases: increases, + Words: test_vector_utils.ToVariableSlice(words), + Nums: test_vector_utils.ToVariableSlice(nums), + })) + } + + for n := 0; n < 1000; n++ { + const maxNbWords = 1000 + buf := make([]byte, (maxNbWords+7)/8) + + // action plan + _, err := rand.Read(buf) + assert.NoError(t, err) + nbWordsUsed := 0 // starting with one word per num + nbWordsPerNum := 1 + increases := make([]byte, 0) + for nbWordsUsed < maxNbWords && nbWordsPerNum < 64 { + inc := buf[len(increases)] % 3 % 2 // double mod to make 0 more likely TODO try other increase values too + nbWordsPerNum += int(inc) + if nbWordsPerNum >= 64 { + inc = byte(nbWordsPerNum) - 63 + nbWordsPerNum = 63 + } + + increases = append(increases, inc) + nbWordsUsed += nbWordsPerNum + } + + words := make([]byte, nbWordsUsed) // random bits + _, err = rand.Read(buf[:min((nbWordsUsed+7)/8, len(buf))]) + assert.NoError(t, err) + for i := range words { + if i < len(buf)*8 { + words[i] = (buf[i/8] >> (i % 8)) & 1 + } + } + + words = []byte{1, 0} + increases = []byte{1} + + nbWordsPerNum = 1 + nums := make([]uint64, len(increases)) + for i := range nums { + nbWordsPerNum += int(increases[i]) + for j := 0; j < nbWordsPerNum && (i+j) < len(words); j++ { + nums[i] |= uint64(words[i+j]) << (nbWordsPerNum - j - 1) + } + } + + runTest(words, increases, nums) + } +} + +type testSetNumNbBitsCircuit struct { + increases []byte + Words []frontend.Variable + Nums []frontend.Variable +} + +func (c *testSetNumNbBitsCircuit) Define(api frontend.API) error { + if len(c.increases) != len(c.Nums) { + return errors.New("must have as many steps as read values") + } + l := 1 + nr := NewNumReader(api, c.Words, l, 1) + for i := range c.increases { + l += int(c.increases[i]) + nr.SetNumNbBits(l) + api.AssertIsEqual(c.Nums[i], nr.Next()) + } + return nil +} diff --git a/std/compress/lzss/large-tests/main.go b/std/compress/lzss/large-tests/main.go new file mode 100644 index 0000000000..84f19f2fc4 --- /dev/null +++ b/std/compress/lzss/large-tests/main.go @@ -0,0 +1,54 @@ +package main + +import ( + "fmt" + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark/frontend" + "github.com/consensys/gnark/frontend/cs/scs" + "github.com/consensys/gnark/profile" + "github.com/consensys/gnark/std/compress/lzss" +) + +func main() { + compileDecompressionCircuit(800 * 1024) + compileDecompressionCircuit(700 * 1024) +} + +func compileDecompressionCircuit(decompressedSize int) { + var nameWithUnit string + { + nameWithUnit = "K" + size := decompressedSize / 1024 + if size >= 1024 { + nameWithUnit = "M" + size /= 1024 + } + nameWithUnit = fmt.Sprintf("%d%s", size, nameWithUnit) + } + + p := profile.Start(profile.WithPath(nameWithUnit + ".pprof")) + const compressedSize = 125 * 1024 + cs, err := frontend.Compile(ecc.BLS12_377.ScalarField(), scs.NewBuilder, &decompressionCircuit{ + Dict: make([]frontend.Variable, 128*1024), + Compressed: make([]frontend.Variable, compressedSize), + MaxCompressionRatio: float32(decompressedSize) / compressedSize, + }, frontend.WithCapacity(100000000)) + if err != nil { + panic(err) + } + p.Stop() + fmt.Println(nameWithUnit, ":", cs.GetNbConstraints(), "constraints") +} + +type decompressionCircuit struct { + Dict, Compressed []frontend.Variable + CompressedLen frontend.Variable + MaxCompressionRatio float32 +} + +func (c *decompressionCircuit) Define(api frontend.API) error { + d := make([]frontend.Variable, int(float32(len(c.Compressed))*c.MaxCompressionRatio)) + fmt.Println("decompressed length", len(d), "bytes") + _, err := lzss.Decompress(api, c.Compressed, c.CompressedLen, d, c.Dict) + return err +} diff --git a/std/compress/lzss/snark.go b/std/compress/lzss/snark.go index 3943078f6c..47f38d6f6e 100644 --- a/std/compress/lzss/snark.go +++ b/std/compress/lzss/snark.go @@ -10,43 +10,44 @@ import ( "github.com/consensys/gnark/std/lookup/logderivlookup" ) +// TODO Provide option for c to be in sizes other than bytes + // Decompress decompresses c into d using dict as the dictionary // which must come pre "augmented" // it is on the caller to ensure that the dictionary is correct; in particular it must consist of bytes. Decompress does not check this. // it is recommended to pack the dictionary using compress.Pack and take a MiMC checksum of it. // d will consist of bytes // It returns the length of d as a frontend.Variable; if the decompressed stream doesn't fit in d, dLength will be "-1" -func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variable, d, dict []frontend.Variable, level lzss.Level) (dLength frontend.Variable, err error) { +func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variable, d, dict []frontend.Variable) (dLength frontend.Variable, err error) { api.AssertIsLessOrEqual(cLength, len(c)) // sanity check // size-related "constants" - wordNbBits := int(level) - shortBackRefType, longBackRefType, dictBackRefType := lzss.InitBackRefTypes(len(dict), level) // init the dictionary and backref types; only needed for the constants below - shortBrNbWords := int(shortBackRefType.NbBitsBackRef) / wordNbBits - longBrNbWords := int(longBackRefType.NbBitsBackRef) / wordNbBits - dictBrNbWords := int(dictBackRefType.NbBitsBackRef) / wordNbBits - byteNbWords := uint(8 / wordNbBits) + shortBackRefType := lzss.NewShortBackrefType() + dynamicBackRefType := lzss.NewDynamicBackrefType(len(dict), 0) // check header: version and compression level const ( sizeHeader = 3 - version = 0 + version = 1 ) + api.AssertIsLessOrEqual(sizeHeader, len(c)) api.AssertIsEqual(c[0], version/256) api.AssertIsEqual(c[1], version%256) - fileCompressionMode := c[2] - api.AssertIsEqual(api.Mul(fileCompressionMode, fileCompressionMode), api.Mul(fileCompressionMode, wordNbBits)) // if fcm!=0, then fcm=wordNbBits - decompressionNotBypassed := api.Sub(1, api.IsZero(fileCompressionMode)) + decompressionBypassed := c[2] + api.AssertIsBoolean(decompressionBypassed) + if len(c) == 3 { + return 0, nil + } // check that the input is in range and convert into small words rangeChecker := internal.NewRangeChecker(api) bytes := make([]frontend.Variable, len(c)-sizeHeader+1) copy(bytes, c[sizeHeader:]) - bytes[len(bytes)-1] = 0 // pad with a zero to avoid out of range errors - c, bytes = rangeChecker.BreakUpBytesIntoWords(wordNbBits, bytes...) // from this point on c is in words - cLength = api.Mul(api.Sub(cLength, sizeHeader), 8/wordNbBits) // one constraint; insignificant impact anyway + bytes[len(bytes)-1] = 0 // pad with a zero to avoid out of range errors + c, bytes = rangeChecker.BreakUpBytesIntoWords(1, bytes...) // from this point on c is in bits + cLength = api.Mul(api.Sub(cLength, sizeHeader), 8) // one constraint; insignificant impact anyway // create a random-access table to be referenced outTable := logderivlookup.New(api) @@ -57,40 +58,44 @@ func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variab // formatted input bytesTable := sliceToTable(api, bytes) - addrTable := initAddrTable(api, bytes, c, wordNbBits, []lzss.BackrefType{shortBackRefType, longBackRefType, dictBackRefType}) + addrTable := initAddrTable(api, bytes, c, shortBackRefType, dynamicBackRefType) // state variables inI := frontend.Variable(0) copyLen := frontend.Variable(0) // remaining length of the current copy copyLen01 := frontend.Variable(1) - eof := frontend.Variable(0) - dLength = -1 // if the following loop ends before hitting eof, we will get the "error" value -1 for dLength + eof := api.IsZero(cLength) + dLength = api.Add(-1, eof) // if the following loop ends before hitting eof, we will get the "error" value -1 for dLength for outI := range d { curr := bytesTable.Lookup(inI)[0] - currMinusLong := api.Sub(api.Mul(curr, decompressionNotBypassed), lzss.SymbolLong) // if bypassing decompression, currIndicatesXX = 0 - currIndicatesLongBr := api.IsZero(currMinusLong) - currIndicatesShortBr := api.IsZero(api.Sub(currMinusLong, lzss.SymbolShort-lzss.SymbolLong)) - currIndicatesDr := api.IsZero(api.Sub(currMinusLong, lzss.SymbolDict-lzss.SymbolLong)) - currIndicatesBr := api.Add(currIndicatesLongBr, currIndicatesShortBr) - currIndicatesCp := api.Add(currIndicatesBr, currIndicatesDr) + dynamicBackRefType = lzss.NewDynamicBackrefType(len(dict), outI) + // ASSUMPTION: 0 is not a backref indicator + + // if bypassing decompression, currIndicatesXX = 0 + // ( - curr + bypassed * curr + symbolXX == 0 ) == currIndicatesXX + currMinusShort := plonk.EvaluateExpression(api, curr, decompressionBypassed, -1, 0, 1, int(lzss.SymbolShort)) + currIndicatesShortBr := api.IsZero(currMinusShort) + + currMinusDyn := plonk.EvaluateExpression(api, curr, decompressionBypassed, -1, 0, 1, int(lzss.SymbolDynamic)) + currIndicatesDynBr := api.IsZero(currMinusDyn) + + currIndicatesBr := api.Add(currIndicatesShortBr, currIndicatesDynBr) - //currIndicatedCpLen := api.Add(1, lenTable.Lookup(inI)[0]) // TODO Get rid of the +1 - currIndicatedCpLen := api.Add(1, bytesTable.Lookup(api.Add(inI, byteNbWords))[0]) // TODO Get rid of the +1 - currIndicatedCpAddr := addrTable.Lookup(inI)[0] + currIndicatedBrLen := bytesTable.Lookup(api.Add(inI, 8))[0] // this is too small by 1 + currIndicatedBrLen = plonk.EvaluateExpression(api, currIndicatesBr, currIndicatedBrLen, 1, 0, 1, 0) // if not at a br, len is guaranteed to be 0 + currIndicatedBrAddr := addrTable.Lookup(inI)[0] // unlike len, addr can be non-zero even if we're not at a br - copyLen = api.Select(copyLen01, api.Mul(currIndicatesCp, currIndicatedCpLen), api.Sub(copyLen, 1)) - copyLen01 = api.IsZero(api.MulAcc(api.Neg(copyLen), copyLen, copyLen)) + copyLen = api.Select(copyLen01, currIndicatedBrLen, api.Sub(copyLen, 1)) + copyLen01 = api.IsZero(api.MulAcc(api.Neg(copyLen), copyLen, copyLen)) // - copyLen + copyLen² == 0? // copying = copyLen01 ? copyLen==1 : 1 either from previous iterations or starting a new copy // copying = copyLen01 ? copyLen : 1 copying := plonk.EvaluateExpression(api, copyLen01, copyLen, -1, 0, 1, 1) - copyAddr := api.Mul(api.Sub(outI+len(dict)-1, currIndicatedCpAddr), currIndicatesBr) - dictCopyAddr := api.Add(currIndicatedCpAddr, api.Sub(currIndicatedCpLen, copyLen)) - copyAddr = api.MulAcc(copyAddr, currIndicatesDr, dictCopyAddr) + copyAddr := api.Mul(api.Sub(outI+len(dict)-1, currIndicatedBrAddr), currIndicatesBr) // if no backref, don't read to avoid out of range TODO for expected compression ratio > 8, just zero out the input past cLen toCopy := outTable.Lookup(copyAddr)[0] // write to output @@ -100,10 +105,11 @@ func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variab // WARNING: curr modified by MulAcc outTable.Insert(d[outI]) - // EOF Logic - inIDelta := api.Add(api.Mul(currIndicatesLongBr, longBrNbWords), api.Mul(currIndicatesShortBr, shortBrNbWords)) - inIDelta = api.MulAcc(inIDelta, currIndicatesDr, dictBrNbWords) - inIDelta = api.Select(copying, api.Mul(inIDelta, copyLen01), byteNbWords) + // advancing inI and EOF + // advance by byte or backref length + inIDelta := api.Add(8, api.Mul(currIndicatesDynBr, dynamicBackRefType.NbBitsBackRef-8), api.Mul(currIndicatesShortBr, shortBackRefType.NbBitsBackRef-8)) + // ... unless we're IN THE MIDDLE OF a copy + inIDelta = api.Mul(inIDelta, copyLen01) // TODO Try removing this check and requiring the user to pad the input with nonzeros // TODO Change inner to mulacc once https://github.com/Consensys/gnark/pull/859 is merged @@ -114,7 +120,7 @@ func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variab inI = api.Add(inI, plonk.EvaluateExpression(api, inIDelta, eof, 1, 0, -1, 0)) // if eof, stay put } - eofNow := rangeChecker.IsLessThan(byteNbWords, api.Sub(cLength, inI)) // less than a byte left; meaning we are at the end of the input + eofNow := rangeChecker.IsLessThan(8, api.Sub(cLength, inI)) // less than a byte left; meaning we are at the end of the input // if eof, don't advance dLength // if eof was JUST hit, dLength += outI + 2; so dLength = -1 + outI + 2 = outI + 1 which is the current output length @@ -133,31 +139,34 @@ func sliceToTable(api frontend.API, slice []frontend.Variable) *logderivlookup.T return table } -func initAddrTable(api frontend.API, bytes, c []frontend.Variable, wordNbBits int, backrefs []lzss.BackrefType) *logderivlookup.Table { - for i := range backrefs { - if backrefs[i].NbBitsLength != backrefs[0].NbBitsLength { +// the "address" is zero when we don't have a backref delimiter +func initAddrTable(api frontend.API, bytes, _bits []frontend.Variable, backRefs ...lzss.BackrefType) *logderivlookup.Table { + if len(backRefs) != 2 { + panic("two backref types are expected, due to opts at the end of the function") + } + + for i := range backRefs { + if backRefs[i].NbBitsLength != backRefs[0].NbBitsLength { panic("all backref types must have the same length size") } } - readers := make([]*compress.NumReader, len(backrefs)) - delimAndLenNbWords := int(8+backrefs[0].NbBitsLength) / wordNbBits - for i := range backrefs { + + readers := make([]*compress.NumReader, len(backRefs)) + delimAndLenNbWords := int(8 + backRefs[0].NbBitsLength) + for i := range backRefs { var readerC []frontend.Variable - if len(c) >= delimAndLenNbWords { - readerC = c[delimAndLenNbWords:] + if len(_bits) >= delimAndLenNbWords { + readerC = _bits[delimAndLenNbWords:] } - readers[i] = compress.NewNumReader(api, readerC, int(backrefs[i].NbBitsAddress), wordNbBits) + readers[i] = compress.NewNumReader(api, readerC, int(backRefs[i].NbBitsAddress), 1) } res := logderivlookup.New(api) - for i := range c { - entry := frontend.Variable(0) - for j := range backrefs { - isSymb := api.IsZero(api.Sub(bytes[i], backrefs[j].Delimiter)) - entry = api.MulAcc(entry, isSymb, readers[j].Next()) - } + for i := range _bits { + is0 := api.IsZero(api.Sub(bytes[i], backRefs[0].Delimiter)) + entry := api.Select(is0, readers[0].Next(), readers[1].Next()) res.Insert(entry) } diff --git a/std/compress/lzss/snark_test.go b/std/compress/lzss/snark_test.go index f8db937191..c45db3778d 100644 --- a/std/compress/lzss/snark_test.go +++ b/std/compress/lzss/snark_test.go @@ -3,6 +3,8 @@ package lzss import ( "crypto/sha256" "encoding/hex" + "fmt" + "github.com/consensys/gnark/frontend/cs/scs" "os" "testing" @@ -16,12 +18,40 @@ import ( "github.com/stretchr/testify/require" ) +func TestNothingRoundTrip(t *testing.T) { + testCompressionRoundTrip(t, nil, nil) +} + +func TestPaddedNothingRoundTrip(t *testing.T) { + + d := []frontend.Variable{0, 0, 0} + c := []frontend.Variable{0, 1, 0, 255} + + circuit := &DecompressionTestCircuit{ + C: make([]frontend.Variable, len(c)), + D: make([]frontend.Variable, len(d)), + Dict: nil, + CheckCorrectness: true, + } + assignment := &DecompressionTestCircuit{ + C: c, + D: d, + CBegin: 0, + CLength: 3, + DLength: 0, + } + + RegisterHints() + test.NewAssert(t).CheckCircuit(circuit, test.WithValidAssignment(assignment), test.WithBackends(backend.PLONK), test.WithCurves(ecc.BLS12_377)) + +} + func Test1One(t *testing.T) { testCompressionRoundTrip(t, []byte{1}, nil) } -func TestGoodCompression(t *testing.T) { - testCompressionRoundTrip(t, []byte{1, 2}, nil, withLevel(lzss.GoodCompression)) +func TestOneTwo(t *testing.T) { + testCompressionRoundTrip(t, []byte{1, 2}, nil) } func Test0To10Explicit(t *testing.T) { @@ -30,31 +60,82 @@ func Test0To10Explicit(t *testing.T) { const inputExtraBytes = 5 -func TestNoCompression(t *testing.T) { +func craftExpandingInput(dict []byte, size int) []byte { + const nbBytesExpandingBlock = 4 // TODO @gbotrel check that - d, err := os.ReadFile("./testdata/3c2943/data.bin") - assert.NoError(t, err) + // the following two methods convert between a byte slice and a number; just for convenient use as map keys and counters + bytesToNum := func(b []byte) uint64 { + var res uint64 + for i := range b { + res += uint64(b[i]) << uint64(i*8) + } + return res + } + + fillNum := func(dst []byte, n uint64) { + for i := range dst { + dst[i] = byte(n) + n >>= 8 + } + } + + covered := make(map[uint64]struct{}) // combinations present in the dictionary, to avoid + for i := range dict { + if dict[i] == 255 { + covered[bytesToNum(dict[i+1:i+nbBytesExpandingBlock])] = struct{}{} + } + } + isCovered := func(n uint64) bool { + _, ok := covered[n] + return ok + } + + res := make([]byte, size) + var blockCtr uint64 + for i := 0; i < len(res); i += nbBytesExpandingBlock { + for isCovered(blockCtr) { + blockCtr++ + if blockCtr == 0 { + panic("overflow") + } + } + res[i] = 255 + fillNum(res[i+1:i+nbBytesExpandingBlock], blockCtr) + blockCtr++ + if blockCtr == 0 { + panic("overflow") + } + } + return res +} + +func TestNoCompression(t *testing.T) { dict := getDictionary() - compressor, err := lzss.NewCompressor(dict, lzss.NoCompression) + d := craftExpandingInput(dict, 1000) + + compressor, err := lzss.NewCompressor(dict) require.NoError(t, err) - c, err := compressor.Compress(d) + _, err = compressor.Write(d) require.NoError(t, err) - decompressorLevel := lzss.BestCompression + require.True(t, compressor.ConsiderBypassing(), "not expanding; refer back to the compress repo for an updated craftExpandingInput implementation.") + + c := compressor.Bytes() circuit := &DecompressionTestCircuit{ C: make([]frontend.Variable, len(c)+inputExtraBytes), - D: d, + D: make([]frontend.Variable, len(d)), Dict: dict, CheckCorrectness: true, - Level: decompressorLevel, } assignment := &DecompressionTestCircuit{ C: test_vector_utils.ToVariableSlice(append(c, make([]byte, inputExtraBytes)...)), + D: test_vector_utils.ToVariableSlice(d), CBegin: 0, CLength: len(c), + DLength: len(d), } RegisterHints() @@ -80,25 +161,24 @@ func Test3c2943withHeader(t *testing.T) { dict := getDictionary() - compressor, err := lzss.NewCompressor(dict, lzss.BestCompression) + compressor, err := lzss.NewCompressor(dict) require.NoError(t, err) c, err := compressor.Compress(d) require.NoError(t, err) c = append([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, c...) - decompressorLevel := lzss.BestCompression - circuit := &DecompressionTestCircuit{ C: make([]frontend.Variable, len(c)+inputExtraBytes), - D: d, + D: make([]frontend.Variable, len(d)), Dict: dict, CheckCorrectness: true, - Level: decompressorLevel, } assignment := &DecompressionTestCircuit{ C: test_vector_utils.ToVariableSlice(append(c, make([]byte, inputExtraBytes)...)), + D: test_vector_utils.ToVariableSlice(d), CBegin: 10, CLength: len(c) - 10, + DLength: len(d), } RegisterHints() @@ -108,7 +188,7 @@ func Test3c2943withHeader(t *testing.T) { func TestOutBufTooShort(t *testing.T) { const truncationAmount = 3 d := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} - compressor, err := lzss.NewCompressor(nil, lzss.BestCompression) + compressor, err := lzss.NewCompressor(nil) require.NoError(t, err) c, err := compressor.Compress(d) require.NoError(t, err) @@ -147,7 +227,6 @@ func Fuzz(f *testing.F) { // TODO This is always skipped } type testCompressionRoundTripSettings struct { - level lzss.Level cBegin int compressedPaddingLen int compressedPaddedLen int @@ -156,12 +235,6 @@ type testCompressionRoundTripSettings struct { type testCompressionRoundTripOption func(settings *testCompressionRoundTripSettings) -func withLevel(level lzss.Level) testCompressionRoundTripOption { - return func(s *testCompressionRoundTripSettings) { - s.level = level - } -} - func withCBegin(cBegin int) testCompressionRoundTripOption { return func(s *testCompressionRoundTripSettings) { s.cBegin = cBegin @@ -199,7 +272,6 @@ func testCompressionRoundTrip(t *testing.T, d, dict []byte, options ...testCompr t.Log("using dict", checksum(dict)) s := testCompressionRoundTripSettings{ - level: lzss.BestCompression, compressedPaddedLen: -1, } @@ -208,7 +280,7 @@ func testCompressionRoundTrip(t *testing.T, d, dict []byte, options ...testCompr } if s.compressed == nil { - compressor, err := lzss.NewCompressor(dict, s.level) + compressor, err := lzss.NewCompressor(dict) require.NoError(t, err) s.compressed, err = compressor.Compress(d) require.NoError(t, err) @@ -219,11 +291,14 @@ func testCompressionRoundTrip(t *testing.T, d, dict []byte, options ...testCompr // duplicating tests from the compress repo, for sanity checking dBack, err := lzss.Decompress(s.compressed, dict) require.NoError(t, err) + if d == nil { + d = []byte{} + } assert.Equal(t, d, dBack) - //assert.NoError(t, os.WriteFile("compress.csv", lzss.CompressedStreamInfo(c, dict).ToCsv(), 0644)) - - // from the blob maker it seems like the compressed stream is 129091 bytes long + /*info, err := lzss.CompressedStreamInfo(s.compressed, dict) + require.NoError(t, err) + assert.NoError(t, os.WriteFile("compress.csv", info.ToCSV(), 0600))*/ if s.compressedPaddedLen != -1 { s.compressedPaddingLen = s.compressedPaddedLen - len(s.compressed) @@ -234,15 +309,16 @@ func testCompressionRoundTrip(t *testing.T, d, dict []byte, options ...testCompr circuit := &DecompressionTestCircuit{ C: make([]frontend.Variable, len(s.compressed)+s.compressedPaddingLen), - D: d, + D: make([]frontend.Variable, len(d)), Dict: dict, CheckCorrectness: true, - Level: s.level, } assignment := &DecompressionTestCircuit{ C: test_vector_utils.ToVariableSlice(append(s.compressed, make([]byte, s.compressedPaddingLen)...)), + D: test_vector_utils.ToVariableSlice(d), CBegin: s.cBegin, CLength: len(s.compressed), + DLength: len(d), } RegisterHints() @@ -265,10 +341,28 @@ type decompressionLengthTestCircuit struct { func (c *decompressionLengthTestCircuit) Define(api frontend.API) error { dict := test_vector_utils.ToVariableSlice(lzss.AugmentDict(nil)) - if dLength, err := Decompress(api, c.C, c.CLength, c.D, dict, lzss.BestCompression); err != nil { + if dLength, err := Decompress(api, c.C, c.CLength, c.D, dict); err != nil { return err } else { api.AssertIsEqual(dLength, c.ExpectedDLength) return nil } } + +func TestBuildDecompress1KBto7KB(t *testing.T) { + cs, err := frontend.Compile(ecc.BLS12_377.ScalarField(), scs.NewBuilder, &decompressionLengthTestCircuit{ + C: make([]frontend.Variable, 1024), + D: make([]frontend.Variable, 7*1024), + }) + assert.NoError(t, err) + fmt.Println(cs.GetNbConstraints()) +} + +func TestBuildDecompress1KBto9KB(t *testing.T) { + cs, err := frontend.Compile(ecc.BLS12_377.ScalarField(), scs.NewBuilder, &decompressionLengthTestCircuit{ + C: make([]frontend.Variable, 1024), + D: make([]frontend.Variable, 9*1024), + }) + assert.NoError(t, err) + fmt.Println(cs.GetNbConstraints()) +} diff --git a/std/compress/lzss/snark_testing.go b/std/compress/lzss/snark_testing.go index 1d1d20b7c6..a224898c86 100644 --- a/std/compress/lzss/snark_testing.go +++ b/std/compress/lzss/snark_testing.go @@ -9,12 +9,12 @@ import ( type DecompressionTestCircuit struct { C []frontend.Variable - D []byte + D []frontend.Variable Dict []byte CBegin frontend.Variable CLength frontend.Variable + DLength frontend.Variable CheckCorrectness bool - Level lzss.Level } func (c *DecompressionTestCircuit) Define(api frontend.API) error { @@ -23,12 +23,12 @@ func (c *DecompressionTestCircuit) Define(api frontend.API) error { if cb, ok := c.CBegin.(int); !ok || cb != 0 { c.C = compress.ShiftLeft(api, c.C, c.CBegin) } - dLen, err := Decompress(api, c.C, c.CLength, dBack, dict, c.Level) + dLen, err := Decompress(api, c.C, c.CLength, dBack, dict) if err != nil { return err } if c.CheckCorrectness { - api.AssertIsEqual(len(c.D), dLen) + api.AssertIsEqual(c.DLength, dLen) for i := range c.D { api.AssertIsEqual(c.D[i], dBack[i]) } From 863af68bc7bf61b0090130351c1714f5079598b7 Mon Sep 17 00:00:00 2001 From: Arya Tabaie Date: Wed, 3 Apr 2024 15:09:33 -0400 Subject: [PATCH 26/55] build bump compress dep version (#1093) --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index b1c3719075..b14fb910fc 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/bits-and-blooms/bitset v1.8.0 github.com/blang/semver/v4 v4.0.0 github.com/consensys/bavard v0.1.13 - github.com/consensys/compress v0.2.5-0.20240302001543-8bd32cc03c5a + github.com/consensys/compress v0.2.5 github.com/consensys/gnark-crypto v0.12.2-0.20240215234832-d72fcb379d3e github.com/fxamacker/cbor/v2 v2.5.0 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index 25931720e9..ccd5701e49 100644 --- a/go.sum +++ b/go.sum @@ -4,10 +4,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/compress v0.2.5-0.20240302001148-4e4b235ce59a h1:hU8ImZi7a+rpSQfEXICzXhIBXksjTEqm/rshKarNOwQ= -github.com/consensys/compress v0.2.5-0.20240302001148-4e4b235ce59a/go.mod h1:pyM+ZXiNUh7/0+AUjUf9RKUM6vSH7T/fsn5LLS0j1Tk= -github.com/consensys/compress v0.2.5-0.20240302001543-8bd32cc03c5a h1:cCYsbALfz+DjWfoRZZxBSlOq4fnqfzA8noHMzH6jP74= -github.com/consensys/compress v0.2.5-0.20240302001543-8bd32cc03c5a/go.mod h1:pyM+ZXiNUh7/0+AUjUf9RKUM6vSH7T/fsn5LLS0j1Tk= +github.com/consensys/compress v0.2.5 h1:gJr1hKzbOD36JFsF1AN8lfXz1yevnJi1YolffY19Ntk= +github.com/consensys/compress v0.2.5/go.mod h1:pyM+ZXiNUh7/0+AUjUf9RKUM6vSH7T/fsn5LLS0j1Tk= github.com/consensys/gnark-crypto v0.12.2-0.20240215234832-d72fcb379d3e h1:MKdOuCiy2DAX1tMp2YsmtNDaqdigpY6B5cZQDJ9BvEo= github.com/consensys/gnark-crypto v0.12.2-0.20240215234832-d72fcb379d3e/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= From e5b921751afd38d6729da58921ffe2dacc1a276d Mon Sep 17 00:00:00 2001 From: Ivo Kubjas Date: Thu, 4 Apr 2024 16:15:01 +0200 Subject: [PATCH 27/55] feat: add secp256k1 curve default initializer (#1086) * feat: add secp to GetCurves * feat: handle secp256k1 point marshalling separately --- std/algebra/defaults.go | 6 ++++++ std/algebra/emulated/sw_emulated/point.go | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/std/algebra/defaults.go b/std/algebra/defaults.go index 5f319c4b5d..e90063bb10 100644 --- a/std/algebra/defaults.go +++ b/std/algebra/defaults.go @@ -51,6 +51,12 @@ func GetCurve[FR emulated.FieldParams, G1El G1ElementT](api frontend.API) (Curve return ret, fmt.Errorf("new curve: %w", err) } *s = c + case *Curve[emparams.Secp256k1Fr, sw_emulated.AffinePoint[emparams.Secp256k1Fp]]: + c, err := sw_emulated.New[emparams.Secp256k1Fp, emparams.Secp256k1Fr](api, sw_emulated.GetSecp256k1Params()) + if err != nil { + return ret, fmt.Errorf("new curve: %w", err) + } + *s = c default: return ret, fmt.Errorf("unknown type parametrisation") } diff --git a/std/algebra/emulated/sw_emulated/point.go b/std/algebra/emulated/sw_emulated/point.go index 6c73bb046c..6e2fe62afd 100644 --- a/std/algebra/emulated/sw_emulated/point.go +++ b/std/algebra/emulated/sw_emulated/point.go @@ -7,6 +7,7 @@ import ( "github.com/consensys/gnark/frontend" "github.com/consensys/gnark/std/algebra/algopts" "github.com/consensys/gnark/std/math/emulated" + "github.com/consensys/gnark/std/math/emulated/emparams" "golang.org/x/exp/slices" ) @@ -127,6 +128,11 @@ func (c *Curve[B, S]) MarshalG1(p AffinePoint[B]) []frontend.Variable { res := make([]frontend.Variable, 2*nbBits) copy(res, bx) copy(res[len(bx):], by) + switch any(fp).(type) { + case emparams.Secp256k1Fp: + // in gnark-crypto we do not store the infinity bit for secp256k1 points + return res + } xZ := c.baseApi.IsZero(x) yZ := c.baseApi.IsZero(y) isZero := c.api.Mul(xZ, yZ) From 9761428ed075068728013c14f22e2afa03bffc74 Mon Sep 17 00:00:00 2001 From: Ahmet Yasin Alp <16453361+ahmetyalp@users.noreply.github.com> Date: Fri, 5 Apr 2024 12:57:00 +0300 Subject: [PATCH 28/55] feat: Groth16 Solidity contract with commitments (#1063) * feat: Groth16 Solidity contract with commitments * optimization verifyCompressedProof to remove --via-ir necessity * remove duplicate pairings definition * Address reviews * feat: add WithBackend for integration testing option * feat: add no-commit Solidity test case * chore: init hash functions freshly just in case * fix: remove unused import * fix: hash to field for G16 solidity target * refactor: single test for commitment integration * refactor: use subtests for commitment test * test: disable solidity checks in case of many public inputs * Revert "feat: add WithBackend for integration testing option" This reverts commit 22e9d41be9639aa14faa882d3c70f409a5d1205c. * fix: add missing import * revert prover/verifier options for integration circuits --------- Co-authored-by: Ivo Kubjas --- backend/groth16/bn254/solidity.go | 351 +++++++++++++++--- backend/groth16/bn254/verify.go | 10 + integration_test.go | 6 - internal/backend/circuits/circuits.go | 4 +- internal/backend/circuits/commit.go | 39 +- .../zkpschemes/groth16/groth16.verify.go.tmpl | 18 +- std/math/polynomial/polynomial_test.go | 8 +- test/assert_checkcircuit.go | 15 +- test/assert_solidity.go | 51 ++- test/commitments_test.go | 7 +- 10 files changed, 395 insertions(+), 114 deletions(-) diff --git a/backend/groth16/bn254/solidity.go b/backend/groth16/bn254/solidity.go index a7fef49d10..fb3c2fe7b8 100644 --- a/backend/groth16/bn254/solidity.go +++ b/backend/groth16/bn254/solidity.go @@ -1,9 +1,18 @@ package groth16 +import ( + "bytes" + + "github.com/consensys/gnark-crypto/ecc/bn254/fr" +) + // solidityTemplate // this is an experimental feature and gnark solidity generator as not been thoroughly tested const solidityTemplate = ` {{- $numPublic := sub (len .G1.K) 1 }} +{{- $numCommitments := len .PublicAndCommitmentCommitted }} +{{- $numWitness := sub $numPublic $numCommitments }} +{{- $PublicAndCommitmentCommitted := .PublicAndCommitmentCommitted }} // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; @@ -15,7 +24,7 @@ pragma solidity ^0.8.0; /// to compress proofs. /// @notice See for further explanation. contract Verifier { - + /// Some of the provided public input values are larger than the field modulus. /// @dev Public input elements are not automatically reduced, as this is can be /// a dangerous source of bugs. @@ -27,6 +36,14 @@ contract Verifier { /// provided public input. error ProofInvalid(); + {{- if gt $numCommitments 0 }} + /// The commitment is invalid + /// @dev This can mean that provided commitment points and/or proof of knowledge are not on their + /// curves, that pairing equation fails, or that the commitment and/or proof of knowledge is not for the + /// commitment key. + error CommitmentInvalid(); + {{- end }} + // Addresses of precompiles uint256 constant PRECOMPILE_MODEXP = 0x05; uint256 constant PRECOMPILE_ADD = 0x06; @@ -79,6 +96,20 @@ contract Verifier { uint256 constant DELTA_NEG_Y_0 = {{.G2.Delta.Y.A0.String}}; uint256 constant DELTA_NEG_Y_1 = {{.G2.Delta.Y.A1.String}}; + {{- if gt $numCommitments 0 }} + // Pedersen G point in G2 in powers of i + uint256 constant PEDERSEN_G_X_0 = {{.CommitmentKey.G.X.A0.String}}; + uint256 constant PEDERSEN_G_X_1 = {{.CommitmentKey.G.X.A1.String}}; + uint256 constant PEDERSEN_G_Y_0 = {{.CommitmentKey.G.Y.A0.String}}; + uint256 constant PEDERSEN_G_Y_1 = {{.CommitmentKey.G.Y.A1.String}}; + + // Pedersen GRootSigmaNeg point in G2 in powers of i + uint256 constant PEDERSEN_GROOTSIGMANEG_X_0 = {{.CommitmentKey.GRootSigmaNeg.X.A0.String}}; + uint256 constant PEDERSEN_GROOTSIGMANEG_X_1 = {{.CommitmentKey.GRootSigmaNeg.X.A1.String}}; + uint256 constant PEDERSEN_GROOTSIGMANEG_Y_0 = {{.CommitmentKey.GRootSigmaNeg.Y.A0.String}}; + uint256 constant PEDERSEN_GROOTSIGMANEG_Y_1 = {{.CommitmentKey.GRootSigmaNeg.Y.A1.String}}; + {{- end }} + // Constant and public input points {{- $k0 := index .G1.K 0}} uint256 constant CONSTANT_X = {{$k0.X.String}}; @@ -124,7 +155,7 @@ contract Verifier { // Exponentiation failed. // Should not happen. revert ProofInvalid(); - } + } } /// Invertsion in Fp. @@ -214,7 +245,7 @@ contract Verifier { // Point at infinity return 0; } - + // Note: sqrt_Fp reverts if there is no solution, i.e. the x coordinate is invalid. uint256 y_pos = sqrt_Fp(addmod(mulmod(mulmod(x, x, P), x, P), 3, P)); if (y == y_pos) { @@ -260,7 +291,7 @@ contract Verifier { /// @notice Reverts with InvalidProof if the coefficients are not reduced /// or if the point is not on the curve. /// @notice The G2 curve is defined over the complex extension Fp[i]/(i^2 + 1) - /// with coordinates (x0 + x1 ⋅ i, y0 + y1 ⋅ i). + /// with coordinates (x0 + x1 ⋅ i, y0 + y1 ⋅ i). /// @notice The point at infinity is encoded as (0,0,0,0) and compressed to (0,0). /// @param x0 The real part of the X coordinate. /// @param x1 The imaginary poart of the X coordinate. @@ -316,7 +347,7 @@ contract Verifier { /// Decompress a G2 point. /// @notice Reverts with InvalidProof if the input does not represent a valid point. /// @notice The G2 curve is defined over the complex extension Fp[i]/(i^2 + 1) - /// with coordinates (x0 + x1 ⋅ i, y0 + y1 ⋅ i). + /// with coordinates (x0 + x1 ⋅ i, y0 + y1 ⋅ i). /// @notice The point at infinity is encoded as (0,0,0,0) and compressed to (0,0). /// @param c0 The first half of the compresed point (x0 with two signal bits). /// @param c1 The second half of the compressed point (x1 unmodified). @@ -363,15 +394,28 @@ contract Verifier { /// @notice Computes the multi-scalar-multiplication of the public input /// elements and the verification key including the constant term. /// @param input The public inputs. These are elements of the scalar field Fr. + {{- if gt $numCommitments 0 }} + /// @param publicCommitments public inputs generated from pedersen commitments. + /// @param commitments The Pedersen commitments from the proof. + {{- end }} /// @return x The X coordinate of the resulting G1 point. /// @return y The Y coordinate of the resulting G1 point. - function publicInputMSM(uint256[{{$numPublic}}] calldata input) + {{- if eq $numCommitments 0 }} + function publicInputMSM(uint256[{{$numWitness}}] calldata input) + {{- else }} + function publicInputMSM( + uint256[{{$numWitness}}] calldata input, + uint256[{{$numCommitments}}] memory publicCommitments, + uint256[{{mul 2 $numCommitments}}] memory commitments + ) + {{- end }} internal view returns (uint256 x, uint256 y) { // Note: The ECMUL precompile does not reject unreduced values, so we check this. // Note: Unrolling this loop does not cost much extra in code-size, the bulk of the // code-size is in the PUB_ constants. // ECMUL has input (x, y, scalar) and output (x', y'). // ECADD has input (x1, y1, x2, y2) and output (x', y'). + // We reduce commitments(if any) with constants as the first point argument to ECADD. // We call them such that ecmul output is already in the second point // argument to ECADD so we can have a tight loop. bool success = true; @@ -381,19 +425,33 @@ contract Verifier { let s mstore(f, CONSTANT_X) mstore(add(f, 0x20), CONSTANT_Y) + {{- if gt $numCommitments 0 }} + {{- if eq $numWitness 1 }} + mstore(g, mload(commitments)) + mstore(add(g, 0x20), mload(add(commitments, 0x20))) + {{- else }} + success := and(success, staticcall(gas(), PRECOMPILE_ADD, commitments, {{mul 0x40 $numCommitments}}, g, 0x40)) + {{- end }} + success := and(success, staticcall(gas(), PRECOMPILE_ADD, f, 0x80, f, 0x40)) + {{- end }} {{- range $i := intRange $numPublic }} mstore(g, PUB_{{$i}}_X) mstore(add(g, 0x20), PUB_{{$i}}_Y) {{- if eq $i 0 }} s := calldataload(input) - {{- else }} + {{- else if lt $i $numWitness }} s := calldataload(add(input, {{mul $i 0x20}})) + {{- else if eq $i $numWitness }} + s := mload(publicCommitments) + {{- else}} + s := mload(add(publicCommitments, {{mul 0x20 (sub $i $numWitness)}})) {{- end }} mstore(add(g, 0x40), s) success := and(success, lt(s, R)) success := and(success, staticcall(gas(), PRECOMPILE_MUL, g, 0x60, g, 0x40)) success := and(success, staticcall(gas(), PRECOMPILE_ADD, f, 0x80, f, 0x40)) {{- end }} + x := mload(f) y := mload(add(f, 0x20)) } @@ -409,13 +467,40 @@ contract Verifier { /// but does not verify the proof itself. /// @param proof The uncompressed Groth16 proof. Elements are in the same order as for /// verifyProof. I.e. Groth16 points (A, B, C) encoded as in EIP-197. + {{- if gt $numCommitments 0 }} + /// @param commitments Pedersen commitments from the proof. + /// @param commitmentPok proof of knowledge for the Pedersen commitments. + {{- end }} /// @return compressed The compressed proof. Elements are in the same order as for /// verifyCompressedProof. I.e. points (A, B, C) in compressed format. + {{- if gt $numCommitments 0 }} + /// @return compressedCommitments compressed Pedersen commitments from the proof. + /// @return compressedCommitmentPok compressed proof of knowledge for the Pedersen commitments. + {{- end }} + {{- if eq $numCommitments 0 }} function compressProof(uint256[8] calldata proof) public view returns (uint256[4] memory compressed) { + {{- else }} + function compressProof( + uint256[8] calldata proof, + uint256[{{mul 2 $numCommitments}}] calldata commitments, + uint256[2] calldata commitmentPok + ) + public view returns ( + uint256[4] memory compressed, + uint256[{{$numCommitments}}] memory compressedCommitments, + uint256 compressedCommitmentPok + ) { + {{- end }} compressed[0] = compress_g1(proof[0], proof[1]); (compressed[2], compressed[1]) = compress_g2(proof[3], proof[2], proof[5], proof[4]); compressed[3] = compress_g1(proof[6], proof[7]); + {{- if gt $numCommitments 0 }} + {{- range $i := intRange $numCommitments }} + compressedCommitments[{{$i}}] = compress_g1(commitments[{{mul 2 $i}}], commitments[{{sum (mul 2 $i) 1}}]); + {{- end }} + compressedCommitmentPok = compress_g1(commitmentPok[0], commitmentPok[1]); + {{- end }} } /// Verify a Groth16 proof with compressed points. @@ -425,61 +510,137 @@ contract Verifier { /// proof was successfully verified. /// @param compressedProof the points (A, B, C) in compressed format /// matching the output of compressProof. + {{- if gt $numCommitments 0 }} + /// @param compressedCommitments compressed Pedersen commitments from the proof. + /// @param compressedCommitmentPok compressed proof of knowledge for the Pedersen commitments. + {{- end }} /// @param input the public input field elements in the scalar field Fr. /// Elements must be reduced. function verifyCompressedProof( uint256[4] calldata compressedProof, - uint256[{{$numPublic}}] calldata input + {{- if gt $numCommitments 0}} + uint256[{{$numCommitments}}] calldata compressedCommitments, + uint256 compressedCommitmentPok, + {{- end }} + uint256[{{$numWitness}}] calldata input ) public view { - (uint256 Ax, uint256 Ay) = decompress_g1(compressedProof[0]); - (uint256 Bx0, uint256 Bx1, uint256 By0, uint256 By1) = decompress_g2( - compressedProof[2], compressedProof[1]); - (uint256 Cx, uint256 Cy) = decompress_g1(compressedProof[3]); - (uint256 Lx, uint256 Ly) = publicInputMSM(input); - - // Verify the pairing - // Note: The precompile expects the F2 coefficients in big-endian order. - // Note: The pairing precompile rejects unreduced values, so we won't check that here. + {{- if gt $numCommitments 0 }} + uint256[{{$numCommitments}}] memory publicCommitments; + uint256[{{mul 2 $numCommitments}}] memory commitments; + {{- end }} uint256[24] memory pairings; - // e(A, B) - pairings[ 0] = Ax; - pairings[ 1] = Ay; - pairings[ 2] = Bx1; - pairings[ 3] = Bx0; - pairings[ 4] = By1; - pairings[ 5] = By0; - // e(C, -δ) - pairings[ 6] = Cx; - pairings[ 7] = Cy; - pairings[ 8] = DELTA_NEG_X_1; - pairings[ 9] = DELTA_NEG_X_0; - pairings[10] = DELTA_NEG_Y_1; - pairings[11] = DELTA_NEG_Y_0; - // e(α, -β) - pairings[12] = ALPHA_X; - pairings[13] = ALPHA_Y; - pairings[14] = BETA_NEG_X_1; - pairings[15] = BETA_NEG_X_0; - pairings[16] = BETA_NEG_Y_1; - pairings[17] = BETA_NEG_Y_0; - // e(L_pub, -γ) - pairings[18] = Lx; - pairings[19] = Ly; - pairings[20] = GAMMA_NEG_X_1; - pairings[21] = GAMMA_NEG_X_0; - pairings[22] = GAMMA_NEG_Y_1; - pairings[23] = GAMMA_NEG_Y_0; - - // Check pairing equation. - bool success; - uint256[1] memory output; - assembly ("memory-safe") { - success := staticcall(gas(), PRECOMPILE_VERIFY, pairings, 0x300, output, 0x20) + + {{- if gt $numCommitments 0 }} + { + {{- if eq $numCommitments 1 }} + (commitments[0], commitments[1]) = decompress_g1(compressedCommitments[0]); + {{- else }} + // TODO: We need to fold commitments into single point + for (uint256 i = 0; i < {{$numCommitments}}; i++) { + (commitments[2*i], commitments[2*i+1]) = decompress_g1(compressedCommitments[i]); + } + {{- end}} + (uint256 Px, uint256 Py) = decompress_g1(compressedCommitmentPok); + {{- range $i := intRange $numCommitments }} + publicCommitments[{{$i}}] = uint256( + sha256( + abi.encodePacked( + commitments[{{mul $i 2}}], + commitments[{{sum (mul $i 2) 1}}] + {{- $pcIndex := index $PublicAndCommitmentCommitted $i }} + {{- range $j := intRange (len $pcIndex) }} + {{- $l := index $pcIndex $j }} + ,input[{{sub $l 1}}] + {{- end }} + ) + ) + ) % R; + {{- end }} + // Commitments + pairings[ 0] = commitments[0]; + pairings[ 1] = commitments[1]; + pairings[ 2] = PEDERSEN_G_X_1; + pairings[ 3] = PEDERSEN_G_X_0; + pairings[ 4] = PEDERSEN_G_Y_1; + pairings[ 5] = PEDERSEN_G_Y_0; + pairings[ 6] = Px; + pairings[ 7] = Py; + pairings[ 8] = PEDERSEN_GROOTSIGMANEG_X_1; + pairings[ 9] = PEDERSEN_GROOTSIGMANEG_X_0; + pairings[10] = PEDERSEN_GROOTSIGMANEG_Y_1; + pairings[11] = PEDERSEN_GROOTSIGMANEG_Y_0; + + // Verify pedersen commitments + bool success; + assembly ("memory-safe") { + let f := mload(0x40) + + success := staticcall(gas(), PRECOMPILE_VERIFY, pairings, 0x180, f, 0x20) + success := and(success, mload(f)) + } + if (!success) { + revert CommitmentInvalid(); + } } - if (!success || output[0] != 1) { - // Either proof or verification key invalid. - // We assume the contract is correctly generated, so the verification key is valid. - revert ProofInvalid(); + {{- end }} + + { + (uint256 Ax, uint256 Ay) = decompress_g1(compressedProof[0]); + (uint256 Bx0, uint256 Bx1, uint256 By0, uint256 By1) = decompress_g2(compressedProof[2], compressedProof[1]); + (uint256 Cx, uint256 Cy) = decompress_g1(compressedProof[3]); + {{- if eq $numCommitments 0 }} + (uint256 Lx, uint256 Ly) = publicInputMSM(input); + {{- else }} + (uint256 Lx, uint256 Ly) = publicInputMSM( + input, + publicCommitments, + commitments + ); + {{- end}} + + // Verify the pairing + // Note: The precompile expects the F2 coefficients in big-endian order. + // Note: The pairing precompile rejects unreduced values, so we won't check that here. + // e(A, B) + pairings[ 0] = Ax; + pairings[ 1] = Ay; + pairings[ 2] = Bx1; + pairings[ 3] = Bx0; + pairings[ 4] = By1; + pairings[ 5] = By0; + // e(C, -δ) + pairings[ 6] = Cx; + pairings[ 7] = Cy; + pairings[ 8] = DELTA_NEG_X_1; + pairings[ 9] = DELTA_NEG_X_0; + pairings[10] = DELTA_NEG_Y_1; + pairings[11] = DELTA_NEG_Y_0; + // e(α, -β) + pairings[12] = ALPHA_X; + pairings[13] = ALPHA_Y; + pairings[14] = BETA_NEG_X_1; + pairings[15] = BETA_NEG_X_0; + pairings[16] = BETA_NEG_Y_1; + pairings[17] = BETA_NEG_Y_0; + // e(L_pub, -γ) + pairings[18] = Lx; + pairings[19] = Ly; + pairings[20] = GAMMA_NEG_X_1; + pairings[21] = GAMMA_NEG_X_0; + pairings[22] = GAMMA_NEG_Y_1; + pairings[23] = GAMMA_NEG_Y_0; + + // Check pairing equation. + bool success; + uint256[1] memory output; + assembly ("memory-safe") { + success := staticcall(gas(), PRECOMPILE_VERIFY, pairings, 0x300, output, 0x20) + } + if (!success || output[0] != 1) { + // Either proof or verification key invalid. + // We assume the contract is correctly generated, so the verification key is valid. + revert ProofInvalid(); + } } } @@ -490,18 +651,77 @@ contract Verifier { /// proof was successfully verified. /// @param proof the points (A, B, C) in EIP-197 format matching the output /// of compressProof. + {{- if gt $numCommitments 0 }} + /// @param commitments the Pedersen commitments from the proof. + /// @param commitmentPok the proof of knowledge for the Pedersen commitments. + {{- end }} /// @param input the public input field elements in the scalar field Fr. /// Elements must be reduced. function verifyProof( uint256[8] calldata proof, - uint256[{{$numPublic}}] calldata input + {{- if gt $numCommitments 0}} + uint256[{{mul 2 $numCommitments}}] calldata commitments, + uint256[2] calldata commitmentPok, + {{- end }} + uint256[{{$numWitness}}] calldata input ) public view { + {{- if eq $numCommitments 0 }} (uint256 x, uint256 y) = publicInputMSM(input); + {{- else }} + // HashToField + uint256[{{$numCommitments}}] memory publicCommitments; + {{- range $i := intRange $numCommitments }} + publicCommitments[{{$i}}] = uint256( + sha256( + abi.encodePacked( + commitments[{{mul $i 2}}], + commitments[{{sum (mul $i 2) 1}}] + {{- $pcIndex := index $PublicAndCommitmentCommitted $i }} + {{- range $j := intRange (len $pcIndex) }} + {{- $l := index $pcIndex $j }} + ,input[{{sub $l 1}}] + {{- end }} + ) + ) + ) % R; + {{- end }} + + // Verify pedersen commitments + bool success; + assembly ("memory-safe") { + let f := mload(0x40) + + calldatacopy(f, commitments, 0x40) // Copy Commitments + mstore(add(f, 0x40), PEDERSEN_G_X_1) + mstore(add(f, 0x60), PEDERSEN_G_X_0) + mstore(add(f, 0x80), PEDERSEN_G_Y_1) + mstore(add(f, 0xa0), PEDERSEN_G_Y_0) + calldatacopy(add(f, 0xc0), commitmentPok, 0x40) + mstore(add(f, 0x100), PEDERSEN_GROOTSIGMANEG_X_1) + mstore(add(f, 0x120), PEDERSEN_GROOTSIGMANEG_X_0) + mstore(add(f, 0x140), PEDERSEN_GROOTSIGMANEG_Y_1) + mstore(add(f, 0x160), PEDERSEN_GROOTSIGMANEG_Y_0) + + success := staticcall(gas(), PRECOMPILE_VERIFY, f, 0x180, f, 0x20) + success := and(success, mload(f)) + } + if (!success) { + revert CommitmentInvalid(); + } + + (uint256 x, uint256 y) = publicInputMSM( + input, + publicCommitments, + commitments + ); + {{- end }} // Note: The precompile expects the F2 coefficients in big-endian order. // Note: The pairing precompile rejects unreduced values, so we won't check that here. - + + {{- if eq $numCommitments 0 }} bool success; + {{- end }} assembly ("memory-safe") { let f := mload(0x40) // Free memory pointer. @@ -543,3 +763,20 @@ contract Verifier { } } ` + +// MarshalSolidity converts a proof to a byte array that can be used in a +// Solidity contract. +func (proof *Proof) MarshalSolidity() []byte { + var buf bytes.Buffer + _, err := proof.WriteRawTo(&buf) + if err != nil { + panic(err) + } + + // If there are no commitments, we can return only Ar | Bs | Krs + if len(proof.Commitments) > 0 { + return buf.Bytes() + } else { + return buf.Bytes()[:8*fr.Bytes] + } +} diff --git a/backend/groth16/bn254/verify.go b/backend/groth16/bn254/verify.go index 14eda65ed6..f8e0927625 100644 --- a/backend/groth16/bn254/verify.go +++ b/backend/groth16/bn254/verify.go @@ -146,6 +146,9 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // See https://github.com/ConsenSys/gnark-tests for example usage. func (vk *VerifyingKey) ExportSolidity(w io.Writer) error { helpers := template.FuncMap{ + "sum": func(a, b int) int { + return a + b + }, "sub": func(a, b int) int { return a - b }, @@ -161,6 +164,13 @@ func (vk *VerifyingKey) ExportSolidity(w io.Writer) error { }, } + log := logger.Logger() + if len(vk.PublicAndCommitmentCommitted) > 1 { + log.Warn().Msg("exporting solidity verifier with more than one commitment is not supported") + } else if len(vk.PublicAndCommitmentCommitted) == 1 { + log.Warn().Msg("exporting solidity verifier only supports `sha256` as `HashToField`. The generated contract may not work for proofs generated with other hash functions.") + } + tmpl, err := template.New("").Funcs(helpers).Parse(solidityTemplate) if err != nil { return err diff --git a/integration_test.go b/integration_test.go index 3220b2ca6a..3909dd4161 100644 --- a/integration_test.go +++ b/integration_test.go @@ -20,7 +20,6 @@ import ( "sort" "testing" - "github.com/consensys/gnark/backend" "github.com/consensys/gnark/constraint/solver" "github.com/consensys/gnark/internal/backend/circuits" "github.com/consensys/gnark/test" @@ -63,11 +62,6 @@ func TestIntegrationAPI(t *testing.T) { opts = append(opts, test.NoFuzzing()) } - if name == "commit" && test.SolcCheck { - // TODO @gbotrel FIXME groth16 solidity verifier needs updating. - opts = append(opts, test.WithBackends(backend.PLONK)) - } - assert.CheckCircuit(tData.Circuit, opts...) }, name) } diff --git a/internal/backend/circuits/circuits.go b/internal/backend/circuits/circuits.go index b6bfd6e4f9..3424a61750 100644 --- a/internal/backend/circuits/circuits.go +++ b/internal/backend/circuits/circuits.go @@ -27,7 +27,7 @@ func addEntry(name string, circuit, proverGood, proverBad frontend.Circuit, curv panic("name " + name + "already taken by another test circuit ") } - Circuits[name] = TestCircuit{circuit, []frontend.Circuit{proverGood}, []frontend.Circuit{proverBad}, nil, curves} + Circuits[name] = TestCircuit{Circuit: circuit, ValidAssignments: []frontend.Circuit{proverGood}, InvalidAssignments: []frontend.Circuit{proverBad}, HintFunctions: nil, Curves: curves} } func addNewEntry(name string, circuit frontend.Circuit, proverGood, proverBad []frontend.Circuit, curves []ecc.ID, hintFunctions ...solver.Hint) { @@ -39,5 +39,5 @@ func addNewEntry(name string, circuit frontend.Circuit, proverGood, proverBad [] } solver.RegisterHint(hintFunctions...) - Circuits[name] = TestCircuit{circuit, proverGood, proverBad, hintFunctions, curves} + Circuits[name] = TestCircuit{Circuit: circuit, ValidAssignments: proverGood, InvalidAssignments: proverBad, HintFunctions: nil, Curves: curves} } diff --git a/internal/backend/circuits/commit.go b/internal/backend/circuits/commit.go index 3db7f17d8f..792f534a2d 100644 --- a/internal/backend/circuits/commit.go +++ b/internal/backend/circuits/commit.go @@ -1,6 +1,10 @@ package circuits -import "github.com/consensys/gnark/frontend" +import ( + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bn254" + "github.com/consensys/gnark/frontend" +) type commitCircuit struct { Public frontend.Variable `gnark:",public"` @@ -23,15 +27,30 @@ func (circuit *commitCircuit) Define(api frontend.API) error { return nil } -func init() { - - var circuit, good, bad commitCircuit - - good.X = 3 - good.Public = 16 +type noCommitCircuit struct { + Public frontend.Variable `gnark:",public"` + X frontend.Variable +} - bad.X = 4 - bad.Public = 0 +func (circuit *noCommitCircuit) Define(api frontend.API) error { + api.AssertIsDifferent(circuit.Public, 0) + a := api.Mul(circuit.X, circuit.X) + for i := 0; i < 10; i++ { + a = api.Mul(a, circuit.X) + } + c := api.Add(a, circuit.X) + api.AssertIsDifferent(c, a) + return nil +} - addEntry("commit", &circuit, &good, &bad, nil) +func init() { + // need to have separate test cases as the hash-to-field for PLONK and Groth16 verifiers are different + addEntry( + "commit", + &commitCircuit{}, &commitCircuit{Public: 16, X: 3}, &commitCircuit{Public: 0, X: 4}, + []ecc.ID{bn254.ID}) + addEntry( + "no_commit", + &noCommitCircuit{}, &noCommitCircuit{Public: 16, X: 3}, &noCommitCircuit{Public: 0, X: 4}, + []ecc.ID{bn254.ID}) } diff --git a/internal/generator/backend/template/zkpschemes/groth16/groth16.verify.go.tmpl b/internal/generator/backend/template/zkpschemes/groth16/groth16.verify.go.tmpl index 80891eb4a3..257359beea 100644 --- a/internal/generator/backend/template/zkpschemes/groth16/groth16.verify.go.tmpl +++ b/internal/generator/backend/template/zkpschemes/groth16/groth16.verify.go.tmpl @@ -94,14 +94,14 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // compute e(Σx.[Kvk(t)]1, -[γ]2) var kSum curve.G1Jac if _, err := kSum.MultiExp(vk.G1.K[1:], publicWitness, ecc.MultiExpConfig{}); err != nil { - return err + return err } kSum.AddMixed(&vk.G1.K[0]) for i := range proof.Commitments { kSum.AddMixed(&proof.Commitments[i]) } - + var kSumAff curve.G1Affine kSumAff.FromJacobian(&kSum) @@ -112,7 +112,7 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac // wait for (eKrsδ, eArBs) if err := <-chDone; err != nil { - return err + return err } right = curve.FinalExponentiation(&right, &doubleML) @@ -128,10 +128,13 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac {{if eq .Curve "BN254"}} // ExportSolidity writes a solidity Verifier contract on provided writer. // This is an experimental feature and gnark solidity generator as not been thoroughly tested. -// +// // See https://github.com/ConsenSys/gnark-tests for example usage. func (vk *VerifyingKey) ExportSolidity(w io.Writer) error { helpers := template.FuncMap{ + "sum": func(a, b int) int { + return a + b + }, "sub": func(a, b int) int { return a - b }, @@ -147,6 +150,13 @@ func (vk *VerifyingKey) ExportSolidity(w io.Writer) error { }, } + log := logger.Logger() + if len(vk.PublicAndCommitmentCommitted) > 1 { + log.Warn().Msg("exporting solidity verifier with more than one commitment is not supported") + } else if len(vk.PublicAndCommitmentCommitted) == 1 { + log.Warn().Msg("exporting solidity verifier only supports `sha256` as `HashToField`. The generated contract may not work for proofs generated with other hash functions.") + } + tmpl, err := template.New("").Funcs(helpers).Parse(solidityTemplate) if err != nil { return err diff --git a/std/math/polynomial/polynomial_test.go b/std/math/polynomial/polynomial_test.go index e9a7757a5e..f93c7d7b28 100644 --- a/std/math/polynomial/polynomial_test.go +++ b/std/math/polynomial/polynomial_test.go @@ -43,7 +43,7 @@ func testEvalPoly[FR emulated.FieldParams](t *testing.T, p []int64, at int64, ev Evaluation: emulated.ValueOf[FR](evaluation), } - assert.CheckCircuit(&evalPolyCircuit[FR]{P: make([]emulated.Element[FR], len(p))}, test.WithValidAssignment(&witness)) + assert.CheckCircuit(&evalPolyCircuit[FR]{P: make([]emulated.Element[FR], len(p))}, test.WithValidAssignment(&witness), test.NoSolidityChecks()) } func TestEvalPoly(t *testing.T) { @@ -98,7 +98,7 @@ func testEvalMultiLin[FR emulated.FieldParams](t *testing.T) { Evaluation: emulated.ValueOf[FR](17), } - assert.CheckCircuit(&evalMultiLinCircuit[FR]{M: make([]emulated.Element[FR], 4), At: make([]emulated.Element[FR], 2)}, test.WithValidAssignment(&witness)) + assert.CheckCircuit(&evalMultiLinCircuit[FR]{M: make([]emulated.Element[FR], 4), At: make([]emulated.Element[FR], 2)}, test.WithValidAssignment(&witness), test.NoSolidityChecks()) } type evalEqCircuit[FR emulated.FieldParams] struct { @@ -144,7 +144,7 @@ func testEvalEq[FR emulated.FieldParams](t *testing.T) { Eq: emulated.ValueOf[FR](148665), } - assert.CheckCircuit(&evalEqCircuit[FR]{X: make([]emulated.Element[FR], 4), Y: make([]emulated.Element[FR], 4)}, test.WithValidAssignment(&witness)) + assert.CheckCircuit(&evalEqCircuit[FR]{X: make([]emulated.Element[FR], 4), Y: make([]emulated.Element[FR], 4)}, test.WithValidAssignment(&witness), test.NoSolidityChecks()) } type interpolateLDECircuit[FR emulated.FieldParams] struct { @@ -180,7 +180,7 @@ func testInterpolateLDE[FR emulated.FieldParams](t *testing.T, at int64, values Expected: emulated.ValueOf[FR](expected), } - assert.CheckCircuit(&interpolateLDECircuit[FR]{Values: make([]emulated.Element[FR], len(values))}, test.WithValidAssignment(assignment)) + assert.CheckCircuit(&interpolateLDECircuit[FR]{Values: make([]emulated.Element[FR], len(values))}, test.WithValidAssignment(assignment), test.NoSolidityChecks()) } func TestInterpolateLDEOnRange(t *testing.T) { diff --git a/test/assert_checkcircuit.go b/test/assert_checkcircuit.go index e666785294..16420065fb 100644 --- a/test/assert_checkcircuit.go +++ b/test/assert_checkcircuit.go @@ -1,6 +1,8 @@ package test import ( + "crypto/sha256" + "github.com/consensys/gnark-crypto/ecc" "github.com/consensys/gnark/backend" "github.com/consensys/gnark/backend/groth16" @@ -120,10 +122,19 @@ func (assert *Assert) CheckCircuit(circuit frontend.Circuit, opts ...TestingOpti w := w assert.Run(func(assert *Assert) { checkSolidity := opt.checkSolidity && curve == ecc.BN254 - proof, err := concreteBackend.prove(ccs, pk, w.full, opt.proverOpts...) + proverOpts := opt.proverOpts + verifierOpts := opt.verifierOpts + if b == backend.GROTH16 { + // currently groth16 Solidity checker only supports circuits with up to 1 commitment + checkSolidity = checkSolidity && (len(ccs.GetCommitments().CommitmentIndexes()) <= 1) + // additionally, we use sha256 as hash to field (fixed in Solidity contract) + proverOpts = append(proverOpts, backend.WithProverHashToFieldFunction(sha256.New())) + verifierOpts = append(verifierOpts, backend.WithVerifierHashToFieldFunction(sha256.New())) + } + proof, err := concreteBackend.prove(ccs, pk, w.full, proverOpts...) assert.noError(err, &w) - err = concreteBackend.verify(proof, vk, w.public, opt.verifierOpts...) + err = concreteBackend.verify(proof, vk, w.public, verifierOpts...) assert.noError(err, &w) if checkSolidity { diff --git a/test/assert_solidity.go b/test/assert_solidity.go index 061b21a37b..16d1437706 100644 --- a/test/assert_solidity.go +++ b/test/assert_solidity.go @@ -1,7 +1,6 @@ package test import ( - "bytes" "encoding/hex" "io" "os" @@ -9,9 +8,8 @@ import ( "path/filepath" "strconv" + fr_bn254 "github.com/consensys/gnark-crypto/ecc/bn254/fr" "github.com/consensys/gnark/backend" - groth16_bn254 "github.com/consensys/gnark/backend/groth16/bn254" - plonk_bn254 "github.com/consensys/gnark/backend/plonk/bn254" "github.com/consensys/gnark/backend/witness" ) @@ -26,7 +24,7 @@ type verifyingKey interface { func (assert *Assert) solidityVerification(b backend.ID, vk verifyingKey, proof any, validPublicWitness witness.Witness) { - if !SolcCheck || vk.NbPublicWitness() == 0 { + if !SolcCheck || len(validPublicWitness.Vector().(fr_bn254.Vector)) == 0 { return // nothing to check, will make solc fail. } assert.t.Helper() @@ -53,29 +51,30 @@ func (assert *Assert) solidityVerification(b backend.ID, vk verifyingKey, out, err := cmd.CombinedOutput() assert.NoError(err, string(out)) - // proof to hex - var proofStr string - var optBackend string + // len(vk.K) - 1 == len(publicWitness) + len(commitments) + numOfCommitments := vk.NbPublicWitness() - len(validPublicWitness.Vector().(fr_bn254.Vector)) + checkerOpts := []string{"verify"} if b == backend.GROTH16 { - optBackend = "--groth16" - var buf bytes.Buffer - _proof := proof.(*groth16_bn254.Proof) - _, err = _proof.WriteRawTo(&buf) - assert.NoError(err) - proofBytes := buf.Bytes() - // keep only fpSize * 8 bytes; for now solidity contract doesn't handle the commitment part. - proofBytes = proofBytes[:32*8] - proofStr = hex.EncodeToString(proofBytes) + checkerOpts = append(checkerOpts, "--groth16") } else if b == backend.PLONK { - optBackend = "--plonk" - _proof := proof.(*plonk_bn254.Proof) - // TODO @gbotrel make a single Marshal function for PlonK proof. - proofStr = hex.EncodeToString(_proof.MarshalSolidity()) + checkerOpts = append(checkerOpts, "--plonk") } else { panic("not implemented") } + // proof to hex + _proof, ok := proof.(interface{ MarshalSolidity() []byte }) + if !ok { + panic("proof does not implement MarshalSolidity()") + } + + proofStr := hex.EncodeToString(_proof.MarshalSolidity()) + + if numOfCommitments > 0 { + checkerOpts = append(checkerOpts, "--commitment", strconv.Itoa(numOfCommitments)) + } + // public witness to hex bPublicWitness, err := validPublicWitness.MarshalBinary() assert.NoError(err) @@ -86,14 +85,14 @@ func (assert *Assert) solidityVerification(b backend.ID, vk verifyingKey, bPublicWitness = bPublicWitness[12:] publicWitnessStr := hex.EncodeToString(bPublicWitness) + checkerOpts = append(checkerOpts, "--dir", tmpDir) + checkerOpts = append(checkerOpts, "--nb-public-inputs", strconv.Itoa(len(validPublicWitness.Vector().(fr_bn254.Vector)))) + checkerOpts = append(checkerOpts, "--proof", proofStr) + checkerOpts = append(checkerOpts, "--public-inputs", publicWitnessStr) + // verify proof // gnark-solidity-checker verify --dir tmdir --groth16 --nb-public-inputs 1 --proof 1234 --public-inputs dead - cmd = exec.Command("gnark-solidity-checker", "verify", - "--dir", tmpDir, - optBackend, - "--nb-public-inputs", strconv.Itoa(vk.NbPublicWitness()), - "--proof", proofStr, - "--public-inputs", publicWitnessStr) + cmd = exec.Command("gnark-solidity-checker", checkerOpts...) assert.t.Log("running ", cmd.String()) out, err = cmd.CombinedOutput() assert.NoError(err, string(out)) diff --git a/test/commitments_test.go b/test/commitments_test.go index 8a8102b3f2..40dc8c166e 100644 --- a/test/commitments_test.go +++ b/test/commitments_test.go @@ -202,11 +202,12 @@ func init() { func TestCommitment(t *testing.T) { t.Parallel() - assert := NewAssert(t) + for i, assignment := range commitmentTestCircuits { - t.Log("circuit", i, removePackageName(reflect.TypeOf(assignment).String())) - assert.CheckCircuit(hollow(assignment), WithValidAssignment(assignment), WithBackends(backend.GROTH16, backend.PLONK)) + assert.Run(func(assert *Assert) { + assert.CheckCircuit(hollow(assignment), WithValidAssignment(assignment), WithBackends(backend.GROTH16, backend.PLONK)) + }, fmt.Sprintf("%d-%s", i, removePackageName(reflect.TypeOf(assignment).String()))) } } From 34370087b77e10884306f5eaff5f76f0b66c0f9f Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Mon, 8 Apr 2024 19:45:40 +0200 Subject: [PATCH 29/55] feat: used in template for proof offsetss --- backend/plonk/bn254/solidity.go | 55 +++++++++++++++++---------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index ecac00e646..9d46583cae 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -68,46 +68,47 @@ contract PlonkVerifier { // ------------------------------------------------ // offset proof - uint256 private constant PROOF_L_COM_X = 0x00; - uint256 private constant PROOF_L_COM_Y = 0x20; - uint256 private constant PROOF_R_COM_X = 0x40; - uint256 private constant PROOF_R_COM_Y = 0x60; - uint256 private constant PROOF_O_COM_X = 0x80; - uint256 private constant PROOF_O_COM_Y = 0xa0; + {{ $offset := 0 }} + uint256 private constant PROOF_L_COM_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_L_COM_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_R_COM_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_R_COM_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_O_COM_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_O_COM_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} // h = h_0 + x^{n+2}h_1 + x^{2(n+2)}h_2 - uint256 private constant PROOF_H_0_X = 0xc0; - uint256 private constant PROOF_H_0_Y = 0xe0; - uint256 private constant PROOF_H_1_X = 0x100; - uint256 private constant PROOF_H_1_Y = 0x120; - uint256 private constant PROOF_H_2_X = 0x140; - uint256 private constant PROOF_H_2_Y = 0x160; + uint256 private constant PROOF_H_0_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_H_0_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_H_1_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_H_1_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_H_2_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_H_2_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} // wire values at zeta - uint256 private constant PROOF_L_AT_ZETA = 0x180; - uint256 private constant PROOF_R_AT_ZETA = 0x1a0; - uint256 private constant PROOF_O_AT_ZETA = 0x1c0; + uint256 private constant PROOF_L_AT_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_R_AT_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_O_AT_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} // S1(zeta),S2(zeta) - uint256 private constant PROOF_S1_AT_ZETA = 0x1e0; // Sσ1(zeta) - uint256 private constant PROOF_S2_AT_ZETA = 0x200; // Sσ2(zeta) + uint256 private constant PROOF_S1_AT_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} // Sσ1(zeta) + uint256 private constant PROOF_S2_AT_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} // Sσ2(zeta) // [Z] - uint256 private constant PROOF_GRAND_PRODUCT_COMMITMENT_X = 0x220; - uint256 private constant PROOF_GRAND_PRODUCT_COMMITMENT_Y = 0x240; + uint256 private constant PROOF_GRAND_PRODUCT_COMMITMENT_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_GRAND_PRODUCT_COMMITMENT_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} - uint256 private constant PROOF_GRAND_PRODUCT_AT_ZETA_OMEGA = 0x260; // z(w*zeta) - uint256 private constant PROOF_LINEARISED_POLYNOMIAL_AT_ZETA = 0x280; // r(zeta) + uint256 private constant PROOF_GRAND_PRODUCT_AT_ZETA_OMEGA = {{ hex $offset }};{{ $offset = add $offset 0x20}} // z(w*zeta) + uint256 private constant PROOF_LINEARISED_POLYNOMIAL_AT_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} // r(zeta) // Folded proof for the opening of linearised poly, l, r, o, s_1, s_2, qcp - uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_X = 0x2a0; - uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_Y = 0x2c0; + uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} - uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_X = 0x2e0; - uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_Y = 0x300; + uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant PROOF_OPENING_AT_ZETA_OMEGA_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} - uint256 private constant PROOF_OPENING_QCP_AT_ZETA = 0x320; - uint256 private constant PROOF_BSB_COMMITMENTS = {{ hex (add 800 (mul (len .CommitmentConstraintIndexes) 32 ) )}}; + uint256 private constant PROOF_OPENING_QCP_AT_ZETA = {{ hex $offset }}; + uint256 private constant PROOF_BSB_COMMITMENTS = {{ hex (add $offset (mul (len .CommitmentConstraintIndexes) 32 ) )}}; // -> next part of proof is // [ openings_selector_commits || commitments_wires_commit_api] From 626b1a7dd0059190852f5d4550c06b718d883b33 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Mon, 8 Apr 2024 19:52:27 +0200 Subject: [PATCH 30/55] fix: fixed type --- backend/plonk/bn254/solidity.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 9d46583cae..583e99a118 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -61,7 +61,7 @@ contract PlonkVerifier { {{ end }} {{ range $index, $element := .CommitmentConstraintIndexes -}} - uint256 private constant VK_INDEX_COMMIT_API{{ $index }} = {{ $element }}; + uint256 private constant VK_INDEX_COMMIT_API_{{ $index }} = {{ $element }}; {{ end -}} uint256 private constant VK_NB_CUSTOM_GATES = {{ len .CommitmentConstraintIndexes }}; @@ -655,7 +655,7 @@ contract PlonkVerifier { {{ range $index, $element := .CommitmentConstraintIndexes}} h_fr := hash_fr(calldataload(p), calldataload(add(p, 0x20)), mPtr) - ith_lagrange := compute_ith_lagrange_at_z(z, zpnmo, add(nb_public_inputs, VK_INDEX_COMMIT_API{{ $index }}), mPtr) + ith_lagrange := compute_ith_lagrange_at_z(z, zpnmo, add(nb_public_inputs, VK_INDEX_COMMIT_API_{{ $index }}), mPtr) pi_commit := addmod(pi_commit, mulmod(h_fr, ith_lagrange, R_MOD), R_MOD) p := add(p, 0x40) {{ end }} From ad5250058683d7ae0d59223f165963c8c129a70f Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Mon, 8 Apr 2024 19:55:52 +0200 Subject: [PATCH 31/55] feat: used to compute offsets in state --- backend/plonk/bn254/solidity.go | 52 ++++++++++++--------------------- 1 file changed, 19 insertions(+), 33 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 583e99a118..8e181c27bd 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -116,39 +116,25 @@ contract PlonkVerifier { // -------- offset state // challenges to check the claimed quotient - uint256 private constant STATE_ALPHA = 0x00; - uint256 private constant STATE_BETA = 0x20; - uint256 private constant STATE_GAMMA = 0x40; - uint256 private constant STATE_ZETA = 0x60; - - // reusable value - uint256 private constant STATE_ALPHA_SQUARE_LAGRANGE_0 = 0x80; - - // commitment to H - uint256 private constant STATE_FOLDED_H_X = 0xa0; - uint256 private constant STATE_FOLDED_H_Y = 0xc0; - - // commitment to the linearised polynomial - uint256 private constant STATE_LINEARISED_POLYNOMIAL_X = 0xe0; - uint256 private constant STATE_LINEARISED_POLYNOMIAL_Y = 0x100; - - // Folded proof for the opening of H, linearised poly, l, r, o, s_1, s_2, qcp - uint256 private constant STATE_FOLDED_CLAIMED_VALUES = 0x120; - - // folded digests of H, linearised poly, l, r, o, s_1, s_2, qcp - uint256 private constant STATE_FOLDED_DIGESTS_X = 0x140; - uint256 private constant STATE_FOLDED_DIGESTS_Y = 0x160; - - uint256 private constant STATE_PI = 0x180; - - uint256 private constant STATE_ZETA_POWER_N_MINUS_ONE = 0x1a0; - - uint256 private constant STATE_GAMMA_KZG = 0x1c0; - - uint256 private constant STATE_SUCCESS = 0x1e0; - uint256 private constant STATE_CHECK_VAR = 0x200; // /!\ this slot is used for debugging only - - uint256 private constant STATE_LAST_MEM = 0x220; + {{ $offset = 0 }} + uint256 private constant STATE_ALPHA = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_BETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_GAMMA = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_ALPHA_SQUARE_LAGRANGE_0 = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_FOLDED_H_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_FOLDED_H_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_LINEARISED_POLYNOMIAL_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_LINEARISED_POLYNOMIAL_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_FOLDED_CLAIMED_VALUES = {{ hex $offset }};{{ $offset = add $offset 0x20}} // Folded proof for the opening of H, linearised poly, l, r, o, s_1, s_2, qcp + uint256 private constant STATE_FOLDED_DIGESTS_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} // folded digests of H, linearised poly, l, r, o, s_1, s_2, qcp + uint256 private constant STATE_FOLDED_DIGESTS_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_PI = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_ZETA_POWER_N_MINUS_ONE = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_GAMMA_KZG = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_SUCCESS = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_CHECK_VAR = {{ hex $offset }};{{ $offset = add $offset 0x20}} // /!\ this slot is used for debugging only + uint256 private constant STATE_LAST_MEM = {{ hex $offset }};{{ $offset = add $offset 0x20}} // -------- utils (for Fiat Shamir) uint256 private constant FS_ALPHA = 0x616C706861; // "alpha" From 47b9ec8968d53324f4fa6c4c778917ae7cfc355e Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Mon, 8 Apr 2024 19:58:23 +0200 Subject: [PATCH 32/55] feat: addition of precompiles as constants --- backend/plonk/bn254/solidity.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 8e181c27bd..68c2148d03 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -150,12 +150,16 @@ contract PlonkVerifier { // -------- utils (for hash_fr) uint256 private constant HASH_FR_BB = 340282366920938463463374607431768211456; // 2**128 uint256 private constant HASH_FR_ZERO_UINT256 = 0; - uint8 private constant HASH_FR_LEN_IN_BYTES = 48; uint8 private constant HASH_FR_SIZE_DOMAIN = 11; uint8 private constant HASH_FR_ONE = 1; uint8 private constant HASH_FR_TWO = 2; {{ end }} + + // -------- precompiles + uint8 private constant EC_ADD = 0x6; + uint8 private constant EC_MUL = 0x7; + uint8 private constant EC_PAIR = 0x8; /// Verify a Plonk proof. /// Reverts if the proof or the public inputs are malformed. From 61d9424b09d2f1b644e5acdc168686d995d8ba52 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Mon, 8 Apr 2024 19:59:58 +0200 Subject: [PATCH 33/55] feat: replaced precompiles opcode with constants --- backend/plonk/bn254/solidity.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 68c2148d03..12a75b6654 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -157,6 +157,7 @@ contract PlonkVerifier { {{ end }} // -------- precompiles + uint8 private constant MOD_EXP = 0x5; uint8 private constant EC_ADD = 0x6; uint8 private constant EC_MUL = 0x7; uint8 private constant EC_PAIR = 0x8; @@ -1227,7 +1228,7 @@ contract PlonkVerifier { mstore(add(mPtr, 0x20), mload(add(p, 0x20))) mstore(add(mPtr, 0x40), mload(q)) mstore(add(mPtr, 0x60), mload(add(q, 0x20))) - let l_success := staticcall(gas(),6,mPtr,0x80,dst,0x40) + let l_success := staticcall(gas(),EC_ADD,mPtr,0x80,dst,0x40) if iszero(l_success) { error_ec_op() } @@ -1242,7 +1243,7 @@ contract PlonkVerifier { mstore(add(mPtr, 0x20), mload(add(p, 0x20))) mstore(add(mPtr, 0x40), calldataload(q)) mstore(add(mPtr, 0x60), calldataload(add(q, 0x20))) - let l_success := staticcall(gas(), 6, mPtr, 0x80, dst, 0x40) + let l_success := staticcall(gas(), EC_ADD, mPtr, 0x80, dst, 0x40) if iszero(l_success) { error_ec_op() } @@ -1256,7 +1257,7 @@ contract PlonkVerifier { mstore(mPtr,mload(src)) mstore(add(mPtr,0x20),mload(add(src,0x20))) mstore(add(mPtr,0x40),s) - let l_success := staticcall(gas(),7,mPtr,0x60,dst,0x40) + let l_success := staticcall(gas(),EC_MUL,mPtr,0x60,dst,0x40) if iszero(l_success) { error_ec_op() } @@ -1270,7 +1271,7 @@ contract PlonkVerifier { mstore(mPtr, calldataload(src)) mstore(add(mPtr, 0x20), calldataload(add(src, 0x20))) mstore(add(mPtr, 0x40), s) - let l_success := staticcall(gas(), 7, mPtr, 0x60, dst, 0x40) + let l_success := staticcall(gas(), EC_MUL, mPtr, 0x60, dst, 0x40) if iszero(l_success) { error_ec_op() } @@ -1288,7 +1289,7 @@ contract PlonkVerifier { let l_success := staticcall(gas(),7,mPtr,0x60,mPtr,0x40) mstore(add(mPtr,0x40),mload(dst)) mstore(add(mPtr,0x60),mload(add(dst,0x20))) - l_success := and(l_success, staticcall(gas(),6,mPtr,0x80,dst, 0x40)) + l_success := and(l_success, staticcall(gas(),EC_ADD,mPtr,0x80,dst, 0x40)) if iszero(l_success) { error_ec_op() } @@ -1307,7 +1308,7 @@ contract PlonkVerifier { let l_success := staticcall(gas(), 7, mPtr, 0x60, mPtr, 0x40) mstore(add(mPtr, 0x40), mload(dst)) mstore(add(mPtr, 0x60), mload(add(dst, 0x20))) - l_success := and(l_success, staticcall(gas(), 6, mPtr, 0x80, dst, 0x40)) + l_success := and(l_success, staticcall(gas(), EC_ADD, mPtr, 0x80, dst, 0x40)) if iszero(l_success) { error_ec_op() } @@ -1333,7 +1334,7 @@ contract PlonkVerifier { mstore(add(mPtr, 0x60), x) mstore(add(mPtr, 0x80), e) mstore(add(mPtr, 0xa0), R_MOD) - let check_staticcall := staticcall(gas(),0x05,mPtr,0xc0,mPtr,0x20) + let check_staticcall := staticcall(gas(),MOD_EXP,mPtr,0xc0,mPtr,0x20) if eq(check_staticcall, 0) { } From 1e20b9eb546178fa7f2cbfa2c311768a629b692f Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Wed, 10 Apr 2024 11:07:35 +0200 Subject: [PATCH 34/55] fix: bn254 -> {{ toLower .Curve }} --- .../backend/template/zkpschemes/plonk/plonk.verify.go.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl index d6e81a07cb..093b1428ad 100644 --- a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl +++ b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl @@ -28,7 +28,7 @@ var ( func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() + log := logger.Logger().With().Str("curve", "{{ toLower .Curve }}").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { From 2026b9de647990d601d5157991477244a5fec40b Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Wed, 10 Apr 2024 11:08:10 +0200 Subject: [PATCH 35/55] feat: code gen --- backend/plonk/bls12-377/verify.go | 2 +- backend/plonk/bls12-381/verify.go | 2 +- backend/plonk/bls24-315/verify.go | 2 +- backend/plonk/bls24-317/verify.go | 2 +- backend/plonk/bw6-633/verify.go | 2 +- backend/plonk/bw6-761/verify.go | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/backend/plonk/bls12-377/verify.go b/backend/plonk/bls12-377/verify.go index 3a06c2767f..d6232fde5f 100644 --- a/backend/plonk/bls12-377/verify.go +++ b/backend/plonk/bls12-377/verify.go @@ -45,7 +45,7 @@ var ( func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() + log := logger.Logger().With().Str("curve", "bls12-377").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { diff --git a/backend/plonk/bls12-381/verify.go b/backend/plonk/bls12-381/verify.go index 3a6eaffbff..6945303ca1 100644 --- a/backend/plonk/bls12-381/verify.go +++ b/backend/plonk/bls12-381/verify.go @@ -45,7 +45,7 @@ var ( func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() + log := logger.Logger().With().Str("curve", "bls12-381").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { diff --git a/backend/plonk/bls24-315/verify.go b/backend/plonk/bls24-315/verify.go index ef86f4a0c4..19acdbecd1 100644 --- a/backend/plonk/bls24-315/verify.go +++ b/backend/plonk/bls24-315/verify.go @@ -45,7 +45,7 @@ var ( func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() + log := logger.Logger().With().Str("curve", "bls24-315").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { diff --git a/backend/plonk/bls24-317/verify.go b/backend/plonk/bls24-317/verify.go index 75ab3d60be..f430d6c35e 100644 --- a/backend/plonk/bls24-317/verify.go +++ b/backend/plonk/bls24-317/verify.go @@ -45,7 +45,7 @@ var ( func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() + log := logger.Logger().With().Str("curve", "bls24-317").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { diff --git a/backend/plonk/bw6-633/verify.go b/backend/plonk/bw6-633/verify.go index 1f58399475..4748c0d3c4 100644 --- a/backend/plonk/bw6-633/verify.go +++ b/backend/plonk/bw6-633/verify.go @@ -45,7 +45,7 @@ var ( func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() + log := logger.Logger().With().Str("curve", "bw6-633").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { diff --git a/backend/plonk/bw6-761/verify.go b/backend/plonk/bw6-761/verify.go index 432711cb02..fb030b7d70 100644 --- a/backend/plonk/bw6-761/verify.go +++ b/backend/plonk/bw6-761/verify.go @@ -45,7 +45,7 @@ var ( func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...backend.VerifierOption) error { - log := logger.Logger().With().Str("curve", "bn254").Str("backend", "plonk").Logger() + log := logger.Logger().With().Str("curve", "bw6-761").Str("backend", "plonk").Logger() start := time.Now() cfg, err := backend.NewVerifierConfig(opts...) if err != nil { From f4f402e83e315b1ded4d760047c5ad430f5b29e3 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Wed, 10 Apr 2024 11:14:38 +0200 Subject: [PATCH 36/55] feat: change sign in comment --- .../backend/template/zkpschemes/plonk/plonk.verify.go.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl index 093b1428ad..3f509fdfee 100644 --- a/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl +++ b/internal/generator/backend/template/zkpschemes/plonk/plonk.verify.go.tmpl @@ -172,8 +172,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] From 1b03f76ac099192a7ddbc852e6c442679a6653f2 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Wed, 10 Apr 2024 11:16:37 +0200 Subject: [PATCH 37/55] feat: code gen --- backend/plonk/bls12-377/verify.go | 4 ++-- backend/plonk/bls12-381/verify.go | 4 ++-- backend/plonk/bls24-315/verify.go | 4 ++-- backend/plonk/bls24-317/verify.go | 4 ++-- backend/plonk/bn254/verify.go | 4 ++-- backend/plonk/bw6-633/verify.go | 4 ++-- backend/plonk/bw6-761/verify.go | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/backend/plonk/bls12-377/verify.go b/backend/plonk/bls12-377/verify.go index d6232fde5f..1d5ae92d42 100644 --- a/backend/plonk/bls12-377/verify.go +++ b/backend/plonk/bls12-377/verify.go @@ -189,8 +189,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] diff --git a/backend/plonk/bls12-381/verify.go b/backend/plonk/bls12-381/verify.go index 6945303ca1..d16ed249fa 100644 --- a/backend/plonk/bls12-381/verify.go +++ b/backend/plonk/bls12-381/verify.go @@ -189,8 +189,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] diff --git a/backend/plonk/bls24-315/verify.go b/backend/plonk/bls24-315/verify.go index 19acdbecd1..2b4089f3b6 100644 --- a/backend/plonk/bls24-315/verify.go +++ b/backend/plonk/bls24-315/verify.go @@ -189,8 +189,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] diff --git a/backend/plonk/bls24-317/verify.go b/backend/plonk/bls24-317/verify.go index f430d6c35e..9e4589b53f 100644 --- a/backend/plonk/bls24-317/verify.go +++ b/backend/plonk/bls24-317/verify.go @@ -189,8 +189,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] diff --git a/backend/plonk/bn254/verify.go b/backend/plonk/bn254/verify.go index 5ab6573b31..6f70830650 100644 --- a/backend/plonk/bn254/verify.go +++ b/backend/plonk/bn254/verify.go @@ -189,8 +189,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] diff --git a/backend/plonk/bw6-633/verify.go b/backend/plonk/bw6-633/verify.go index 4748c0d3c4..d7d09efba1 100644 --- a/backend/plonk/bw6-633/verify.go +++ b/backend/plonk/bw6-633/verify.go @@ -189,8 +189,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] diff --git a/backend/plonk/bw6-761/verify.go b/backend/plonk/bw6-761/verify.go index fb030b7d70..7709c1d0ab 100644 --- a/backend/plonk/bw6-761/verify.go +++ b/backend/plonk/bw6-761/verify.go @@ -189,8 +189,8 @@ func Verify(proof *Proof, vk *VerifyingKey, publicWitness fr.Vector, opts ...bac tmp.Add(&o, &gamma) // (o(ζ)+γ) constLin.Mul(&tmp, &constLin).Mul(&constLin, &alpha).Mul(&constLin, &zu) // α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) - constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) - α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] + constLin.Sub(&constLin, &alphaSquareLagrangeOne).Add(&constLin, &pi) // PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ) + constLin.Neg(&constLin) // -[PI(ζ) - α²*L₁(ζ) + α(l(ζ)+β*s1(ζ)+γ)(r(ζ)+β*s2(ζ)+γ)(o(ζ)+γ)*z(ωζ)] // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] From 0803a5f0ec3e8384c617c6fca410853b98b82a77 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Wed, 10 Apr 2024 11:20:00 +0200 Subject: [PATCH 38/55] feat: re enable test bs12->bw6 --- std/recursion/plonk/verifier_test.go | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/std/recursion/plonk/verifier_test.go b/std/recursion/plonk/verifier_test.go index f07e4e5a42..e37dac2c0c 100644 --- a/std/recursion/plonk/verifier_test.go +++ b/std/recursion/plonk/verifier_test.go @@ -84,32 +84,28 @@ func getInnerWoCommit(assert *test.Assert, field, outer *big.Int) (constraint.Co func TestBLS12InBW6WoCommit(t *testing.T) { assert := test.NewAssert(t) - // innerCcs, innerVK, innerWitness, innerProof := getInnerWoCommit(assert, ecc.BLS12_377.ScalarField(), ecc.BW6_761.ScalarField()) - innerCcs, innerVK, _, _ := getInnerWoCommit(assert, ecc.BLS12_377.ScalarField(), ecc.BW6_761.ScalarField()) + innerCcs, innerVK, innerWitness, innerProof := getInnerWoCommit(assert, ecc.BLS12_377.ScalarField(), ecc.BW6_761.ScalarField()) // outer proof circuitVk, err := ValueOfVerifyingKey[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerVK) assert.NoError(err) - // circuitWitness, err := ValueOfWitness[sw_bls12377.ScalarField](innerWitness) - // assert.NoError(err) - // circuitProof, err := ValueOfProof[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerProof) - // assert.NoError(err) + circuitWitness, err := ValueOfWitness[sw_bls12377.ScalarField](innerWitness) + assert.NoError(err) + circuitProof, err := ValueOfProof[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerProof) + assert.NoError(err) outerCircuit := &OuterCircuit[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine, sw_bls12377.GT]{ InnerWitness: PlaceholderWitness[sw_bls12377.ScalarField](innerCcs), Proof: PlaceholderProof[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine](innerCcs), VerifyingKey: circuitVk, } - ccs, err := frontend.Compile(ecc.BW6_761.ScalarField(), scs.NewBuilder, outerCircuit) assert.NoError(err) - nbConstraints := ccs.GetNbConstraints() - fmt.Printf("nb constraints: %d\n", nbConstraints) - // outerAssignment := &OuterCircuit[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine, sw_bls12377.GT]{ - // InnerWitness: circuitWitness, - // Proof: circuitProof, - // } - // err = test.IsSolved(outerCircuit, outerAssignment, ecc.BW6_761.ScalarField()) - // assert.NoError(err) + outerAssignment := &OuterCircuit[sw_bls12377.ScalarField, sw_bls12377.G1Affine, sw_bls12377.G2Affine, sw_bls12377.GT]{ + InnerWitness: circuitWitness, + Proof: circuitProof, + } + err = test.IsSolved(outerCircuit, outerAssignment, ecc.BW6_761.ScalarField()) + assert.NoError(err) } From 24bca50ff8042491663453bb53f1fc47d068ff72 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Wed, 10 Apr 2024 15:05:49 +0200 Subject: [PATCH 39/55] feat: moved claimed values of linearised polynomial out of the proof --- backend/plonk/bn254/solidity.go | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 12a75b6654..d2c03cfc56 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -98,7 +98,6 @@ contract PlonkVerifier { uint256 private constant PROOF_GRAND_PRODUCT_COMMITMENT_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} uint256 private constant PROOF_GRAND_PRODUCT_AT_ZETA_OMEGA = {{ hex $offset }};{{ $offset = add $offset 0x20}} // z(w*zeta) - uint256 private constant PROOF_LINEARISED_POLYNOMIAL_AT_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} // r(zeta) // Folded proof for the opening of linearised poly, l, r, o, s_1, s_2, qcp uint256 private constant PROOF_BATCH_OPENING_AT_ZETA_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} @@ -126,6 +125,7 @@ contract PlonkVerifier { uint256 private constant STATE_FOLDED_H_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} uint256 private constant STATE_LINEARISED_POLYNOMIAL_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} uint256 private constant STATE_LINEARISED_POLYNOMIAL_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} + uint256 private constant STATE_OPENING_LINEARISED_POLYNOMIAL_ZETA = {{ hex $offset }};{{ $offset = add $offset 0x20}} uint256 private constant STATE_FOLDED_CLAIMED_VALUES = {{ hex $offset }};{{ $offset = add $offset 0x20}} // Folded proof for the opening of H, linearised poly, l, r, o, s_1, s_2, qcp uint256 private constant STATE_FOLDED_DIGESTS_X = {{ hex $offset }};{{ $offset = add $offset 0x20}} // folded digests of H, linearised poly, l, r, o, s_1, s_2, qcp uint256 private constant STATE_FOLDED_DIGESTS_Y = {{ hex $offset }};{{ $offset = add $offset 0x20}} @@ -319,7 +319,7 @@ contract PlonkVerifier { /// Checks if the proof is of the correct size /// @param actual_proof_size size of the proof (not the expected size) function check_proof_size(actual_proof_size) { - let expected_proof_size := add(0x320, mul(VK_NB_CUSTOM_GATES,0x60)) + let expected_proof_size := add(0x300, mul(VK_NB_CUSTOM_GATES,0x60)) if iszero(eq(actual_proof_size, expected_proof_size)) { error_proof_size() } @@ -329,16 +329,9 @@ contract PlonkVerifier { /// @param aproof pointer to the beginning of the proof /// @dev the 'a' prepending proof is to have a local name function check_proof_openings_size(aproof) { - - - // linearised polynomial at zeta - let p := add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA) - if gt(calldataload(p), R_MOD_MINUS_ONE) { - error_proof_openings_size() - } // PROOF_L_AT_ZETA - p := add(aproof, PROOF_L_AT_ZETA) + let p := add(aproof, PROOF_L_AT_ZETA) if gt(calldataload(p), R_MOD_MINUS_ONE) { error_proof_openings_size() } @@ -918,7 +911,7 @@ contract PlonkVerifier { mstore(add(state, STATE_FOLDED_DIGESTS_X), mload(add(state, STATE_LINEARISED_POLYNOMIAL_X))) mstore(add(state, STATE_FOLDED_DIGESTS_Y), mload(add(state, STATE_LINEARISED_POLYNOMIAL_Y))) - mstore(add(state, STATE_FOLDED_CLAIMED_VALUES), calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA))) + mstore(add(state, STATE_FOLDED_CLAIMED_VALUES), mload(add(state, STATE_OPENING_LINEARISED_POLYNOMIAL_ZETA))) point_acc_mul_calldata(add(state, STATE_FOLDED_DIGESTS_X), add(aproof, PROOF_L_COM_X), acc_gamma, mPtr) fr_acc_mul_calldata(add(state, STATE_FOLDED_CLAIMED_VALUES), add(aproof, PROOF_L_AT_ZETA), acc_gamma) @@ -987,13 +980,13 @@ contract PlonkVerifier { let offset := 0x1c0 - {{ range $index, $element := .CommitmentConstraintIndexes }} + {{ range $index, $element := .CommitmentConstraintIndexes -}} mstore(add(mPtr,offset), VK_QCP_{{ $index }}_X) mstore(add(mPtr,add(offset, 0x20)), VK_QCP_{{ $index }}_Y) offset := add(offset, 0x40) - {{ end }} + {{ end -}} - mstore(add(mPtr, offset), calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA))) + mstore(add(mPtr, offset), mload(add(state, STATE_OPENING_LINEARISED_POLYNOMIAL_ZETA))) mstore(add(mPtr, add(offset, 0x20)), calldataload(add(aproof, PROOF_L_AT_ZETA))) mstore(add(mPtr, add(offset, 0x40)), calldataload(add(aproof, PROOF_R_AT_ZETA))) mstore(add(mPtr, add(offset, 0x60)), calldataload(add(aproof, PROOF_O_AT_ZETA))) @@ -1211,10 +1204,7 @@ contract PlonkVerifier { s1 := addmod(s1, s2, R_MOD) s1 := sub(R_MOD, s1) - let opening_linearised_polynomial := calldataload(add(aproof, PROOF_LINEARISED_POLYNOMIAL_AT_ZETA)) - if iszero(eq(opening_linearised_polynomial, s1)) { - error_verify() - } + mstore(add(state, STATE_OPENING_LINEARISED_POLYNOMIAL_ZETA), s1) } // BEGINNING utils math functions ------------------------------------------------- @@ -1395,8 +1385,8 @@ func (proof *Proof) MarshalSolidity() []byte { res = append(res, tmp32[:]...) // uint256 linearization_polynomial_at_zeta; - tmp32 = proof.BatchedProof.ClaimedValues[0].Bytes() - res = append(res, tmp32[:]...) + // tmp32 = proof.BatchedProof.ClaimedValues[0].Bytes() + // res = append(res, tmp32[:]...) // uint256 opening_at_zeta_proof_x; // uint256 opening_at_zeta_proof_y; From acd3529460864b9690244c145f1aba408d61fbaa Mon Sep 17 00:00:00 2001 From: Ivo Kubjas Date: Wed, 10 Apr 2024 19:56:47 +0200 Subject: [PATCH 40/55] perf: non-native multilinear polynomial evaluation (#1087) * perf: optimize multilinear evaluation * test: in same package * test: multilinear evaluation test * refactor: use new multi multilinear eval * fix: edge case with one var --- std/math/polynomial/polynomial.go | 102 ++++++++++---- .../polynomial/polynomial_oldeval_test.go | 50 +++++++ std/math/polynomial/polynomial_test.go | 131 ++++++++++++++++-- std/recursion/sumcheck/claimable_gate.go | 9 +- .../sumcheck/claimable_multilinear.go | 2 +- 5 files changed, 247 insertions(+), 47 deletions(-) create mode 100644 std/math/polynomial/polynomial_oldeval_test.go diff --git a/std/math/polynomial/polynomial.go b/std/math/polynomial/polynomial.go index 28442fdfca..e09ef69ef1 100644 --- a/std/math/polynomial/polynomial.go +++ b/std/math/polynomial/polynomial.go @@ -3,6 +3,7 @@ package polynomial import ( "fmt" "math/big" + "math/bits" "github.com/consensys/gnark/frontend" "github.com/consensys/gnark/std/math/emulated" @@ -102,45 +103,88 @@ func (p *Polynomial[FR]) EvalUnivariate(P Univariate[FR], at *emulated.Element[F // EvalMultilinear evaluates multilinear polynomial at variable values at. It // returns the evaluation. The method does not mutate the inputs. -func (p *Polynomial[FR]) EvalMultilinear(M Multilinear[FR], at []*emulated.Element[FR]) (*emulated.Element[FR], error) { - var s *emulated.Element[FR] - scaleCorrectionFactor := p.f.One() - for len(M) > 1 { - if len(M) >= minFoldScaledLogSize { - M, s = p.foldScaled(M, at[0]) - scaleCorrectionFactor = p.f.Mul(scaleCorrectionFactor, s) - } else { - M = p.fold(M, at[0]) +func (p *Polynomial[FR]) EvalMultilinear(at []*emulated.Element[FR], M Multilinear[FR]) (*emulated.Element[FR], error) { + ret, err := p.EvalMultilinearMany(at, M) + if err != nil { + return nil, err + } + return ret[0], nil +} + +// EvalMultilinearMany evaluates multilinear polynomials at variable values at. It +// returns the evaluations. The method does not mutate the inputs. +// +// The method allows to share computations of computing the coefficients of the +// multilinear polynomials at the given evaluation points. +func (p *Polynomial[FR]) EvalMultilinearMany(at []*emulated.Element[FR], M ...Multilinear[FR]) ([]*emulated.Element[FR], error) { + lenM := len(M[0]) + for i := range M { + if len(M[i]) != lenM { + return nil, fmt.Errorf("incompatible multilinear polynomial sizes") } - at = at[1:] } - if len(at) != 0 { + mlelems := make([][]*emulated.Element[FR], len(M)) + for i := range M { + mlelems[i] = FromSlice(M[i]) + } + if bits.OnesCount(uint(lenM)) != 1 { + return nil, fmt.Errorf("multilinear polynomial length must be a power of 2") + } + nbExpvars := bits.Len(uint(lenM)) - 1 + if len(at) != nbExpvars { return nil, fmt.Errorf("incompatible evaluation vector size") } - return p.f.Mul(&M[0], scaleCorrectionFactor), nil + split1 := nbExpvars / 2 + nbSplit1Elems := 1 << split1 + split2 := nbExpvars - split1 + nbSplit2Elems := 1 << split2 + partialMLEval1 := p.partialMultilinearEval(at[:split1]) + partialMLEval2 := p.partialMultilinearEval(at[split1:]) + sums := make([]*emulated.Element[FR], len(M)) + for k := range mlelems { + partialSums := make([]*emulated.Element[FR], nbSplit2Elems) + for i := range partialSums { + b := make([]*emulated.Element[FR], nbSplit1Elems) + for j := range b { + b[j] = mlelems[k][i+j*nbSplit2Elems] + } + partialSums[i] = p.innerProduct(b, partialMLEval1) + } + sums[k] = p.innerProduct(partialSums, partialMLEval2) + } + return sums, nil } -func (p *Polynomial[FR]) fold(M Multilinear[FR], at *emulated.Element[FR]) Multilinear[FR] { - mid := len(M) / 2 - R := make([]emulated.Element[FR], mid) - for j := range R { - diff := p.f.Sub(&M[mid+j], &M[j]) - diffAt := p.f.Mul(diff, at) - R[j] = *p.f.Add(&M[j], diffAt) +func (p *Polynomial[FR]) partialMultilinearEval(at []*emulated.Element[FR]) []*emulated.Element[FR] { + if len(at) == 0 { + return []*emulated.Element[FR]{p.f.One()} + } + res := []*emulated.Element[FR]{p.f.Sub(p.f.One(), at[len(at)-1]), at[len(at)-1]} + at = at[:len(at)-1] + for len(at) > 0 { + newRes := make([]*emulated.Element[FR], len(res)*2) + x := at[len(at)-1] + for j := range res { + resX := p.f.Mul(res[j], x) + newRes[j] = p.f.Sub(res[j], resX) + newRes[j+len(res)] = resX + } + res = newRes + at = at[:len(at)-1] } - return R + return res } -func (p *Polynomial[FR]) foldScaled(M Multilinear[FR], at *emulated.Element[FR]) (Multilinear[FR], *emulated.Element[FR]) { - denom := p.f.Sub(p.f.One(), at) - coeff := p.f.Div(at, denom) - mid := len(M) / 2 - R := make([]emulated.Element[FR], mid) - for j := range R { - tmp := p.f.Mul(&M[mid+j], coeff) - R[j] = *p.f.Add(&M[j], tmp) +func (p *Polynomial[FR]) innerProduct(a, b []*emulated.Element[FR]) *emulated.Element[FR] { + if len(a) != len(b) { + panic(fmt.Sprintf("incompatible sizes: %d and %d", len(a), len(b))) + } + muls := make([]*emulated.Element[FR], len(a)) + for i := range a { + muls[i] = p.f.MulNoReduce(a[i], b[i]) } - return R, denom + res := p.f.Sum(muls...) + return res } func (p *Polynomial[FR]) computeDeltaAtNaive(at *emulated.Element[FR], vLen int) []*emulated.Element[FR] { diff --git a/std/math/polynomial/polynomial_oldeval_test.go b/std/math/polynomial/polynomial_oldeval_test.go new file mode 100644 index 0000000000..29bc9fc126 --- /dev/null +++ b/std/math/polynomial/polynomial_oldeval_test.go @@ -0,0 +1,50 @@ +package polynomial + +import ( + "fmt" + + "github.com/consensys/gnark/std/math/emulated" +) + +// evalMultilinearOld evaluates a multilinear polynomial at a given point. +// This is the old version of the function, which is kept for comparison purposes. +func (p *Polynomial[FR]) evalMultilinearOld(M Multilinear[FR], at []*emulated.Element[FR]) (*emulated.Element[FR], error) { + var s *emulated.Element[FR] + scaleCorrectionFactor := p.f.One() + for len(M) > 1 { + if len(M) >= minFoldScaledLogSize { + M, s = p.foldScaled(M, at[0]) + scaleCorrectionFactor = p.f.Mul(scaleCorrectionFactor, s) + } else { + M = p.fold(M, at[0]) + } + at = at[1:] + } + if len(at) != 0 { + return nil, fmt.Errorf("incompatible evaluation vector size") + } + return p.f.Mul(&M[0], scaleCorrectionFactor), nil +} + +func (p *Polynomial[FR]) fold(M Multilinear[FR], at *emulated.Element[FR]) Multilinear[FR] { + mid := len(M) / 2 + R := make([]emulated.Element[FR], mid) + for j := range R { + diff := p.f.Sub(&M[mid+j], &M[j]) + diffAt := p.f.Mul(diff, at) + R[j] = *p.f.Add(&M[j], diffAt) + } + return R +} + +func (p *Polynomial[FR]) foldScaled(M Multilinear[FR], at *emulated.Element[FR]) (Multilinear[FR], *emulated.Element[FR]) { + denom := p.f.Sub(p.f.One(), at) + coeff := p.f.Div(at, denom) + mid := len(M) / 2 + R := make([]emulated.Element[FR], mid) + for j := range R { + tmp := p.f.Mul(&M[mid+j], coeff) + R[j] = *p.f.Add(&M[j], tmp) + } + return R, denom +} diff --git a/std/math/polynomial/polynomial_test.go b/std/math/polynomial/polynomial_test.go index f93c7d7b28..ccd2c08b64 100644 --- a/std/math/polynomial/polynomial_test.go +++ b/std/math/polynomial/polynomial_test.go @@ -1,4 +1,4 @@ -package polynomial_test +package polynomial import ( "testing" @@ -6,7 +6,6 @@ import ( "github.com/consensys/gnark/frontend" "github.com/consensys/gnark/std/math/emulated" "github.com/consensys/gnark/std/math/emulated/emparams" - "github.com/consensys/gnark/std/math/polynomial" "github.com/consensys/gnark/test" ) @@ -17,7 +16,7 @@ type evalPolyCircuit[FR emulated.FieldParams] struct { } func (c *evalPolyCircuit[FR]) Define(api frontend.API) error { - p, err := polynomial.New[FR](api) + p, err := New[FR](api) if err != nil { return err } @@ -57,13 +56,13 @@ type evalMultiLinCircuit[FR emulated.FieldParams] struct { } func (c *evalMultiLinCircuit[FR]) Define(api frontend.API) error { - p, err := polynomial.New[FR](api) + p, err := New[FR](api) if err != nil { return err } // M := polynomial.FromSlice(c.M) - X := polynomial.FromSlice(c.At) - res, err := p.EvalMultilinear(c.M, X) + X := FromSlice(c.At) + res, err := p.EvalMultilinear(X, c.M) if err != nil { return err } @@ -108,12 +107,12 @@ type evalEqCircuit[FR emulated.FieldParams] struct { } func (c *evalEqCircuit[FR]) Define(api frontend.API) error { - p, err := polynomial.New[FR](api) + p, err := New[FR](api) if err != nil { return err } - X := polynomial.FromSlice(c.X) - Y := polynomial.FromSlice(c.Y) + X := FromSlice(c.X) + Y := FromSlice(c.Y) evaluation := p.EvalEqual(X, Y) f, err := emulated.NewField[FR](api) if err != nil { @@ -154,11 +153,11 @@ type interpolateLDECircuit[FR emulated.FieldParams] struct { } func (c *interpolateLDECircuit[FR]) Define(api frontend.API) error { - p, err := polynomial.New[FR](api) + p, err := New[FR](api) if err != nil { return err } - vals := polynomial.FromSlice(c.Values) + vals := FromSlice(c.Values) res := p.InterpolateLDE(&c.At, vals) f, err := emulated.NewField[FR](api) if err != nil { @@ -203,3 +202,113 @@ func TestInterpolateQuadraticExtension(t *testing.T) { testInterpolateLDE[emparams.BN254Fr](t, 3, []int64{1, 6, 17}, 34) testInterpolateLDE[emparams.BN254Fr](t, -1, []int64{1, 6, 17}, 2) } + +type TestPartialMultilinearEvalCircuit[FR emulated.FieldParams] struct { + At []emulated.Element[FR] `gnark:",public"` +} + +func (c *TestPartialMultilinearEvalCircuit[FR]) Define(api frontend.API) error { + p, err := New[FR](api) + if err != nil { + return err + } + f, err := emulated.NewField[FR](api) + if err != nil { + return err + } + At := FromSlice(c.At) + coefs := p.partialMultilinearEval(At) + res := f.Zero() + for i := range coefs { + res = f.Add(res, coefs[i]) + } + ones := make([]emulated.Element[FR], 1< Date: Wed, 10 Apr 2024 20:42:48 +0000 Subject: [PATCH 41/55] perf: fold H before big MSM --- std/recursion/plonk/verifier.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/std/recursion/plonk/verifier.go b/std/recursion/plonk/verifier.go index 0e1a38bb5e..c322da6828 100644 --- a/std/recursion/plonk/verifier.go +++ b/std/recursion/plonk/verifier.go @@ -957,12 +957,18 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, // l(ζ)*r(ζ) rl := v.scalarApi.Mul(&l, &r) - // -ζⁿ⁺²*(ζⁿ-1), -ζ²⁽ⁿ⁺²⁾*(ζⁿ-1), -(ζⁿ-1) + // -ζⁿ⁺², -ζ²⁽ⁿ⁺²⁾, -(ζⁿ-1) zhZeta = v.scalarApi.Neg(zhZeta) // -(ζⁿ-1) zetaPowerNPlusTwo := v.scalarApi.Mul(zeta, zetaPowerN) - zetaPowerNPlusTwo = v.scalarApi.Mul(zeta, zetaPowerNPlusTwo) // ζⁿ⁺² - zetaNPlusTwoZh := v.scalarApi.Mul(zetaPowerNPlusTwo, zhZeta) // -ζⁿ⁺²*(ζⁿ-1) - zetaNPlusTwoSquareZh := v.scalarApi.Mul(zetaPowerNPlusTwo, zetaNPlusTwoZh) // -ζ²⁽ⁿ⁺²⁾*(ζⁿ-1) + zetaPowerNPlusTwo = v.scalarApi.Mul(zeta, zetaPowerNPlusTwo) // ζⁿ⁺² + zetaPowerNPlusTwoSquare := v.scalarApi.Mul(zetaPowerNPlusTwo, zetaPowerNPlusTwo) // ζ²⁽ⁿ⁺²⁾ + + // [H₀] + ζⁿ⁺²*[H₁] + ζ²⁽ⁿ⁺²⁾*[H₂] + foldedH, err := v.curve.MultiScalarMul([]*G1El{&proof.H[1].G1El, &proof.H[2].G1El}, []*emulated.Element[FR]{zetaPowerNPlusTwo, zetaPowerNPlusTwoSquare}) + if err != nil { + return nil, nil, nil, fmt.Errorf("folded H: %w", err) + } + foldedH = v.curve.Add(foldedH, &proof.H[0].G1El) points := make([]*G1El, len(proof.Bsb22Commitments)) for i := range proof.Bsb22Commitments { @@ -970,7 +976,8 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, } points = append(points, &vk.Ql.G1El, &vk.Qr.G1El, &vk.Qm.G1El, &vk.Qo.G1El, // first part - &vk.S[2].G1El, &proof.Z.G1El, &proof.H[0].G1El, &proof.H[1].G1El, &proof.H[2].G1El, // second & third part + &vk.S[2].G1El, &proof.Z.G1El, // second part + foldedH, // third part ) qC := make([]*emulated.Element[FR], len(proof.Bsb22Commitments)) @@ -979,7 +986,8 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, } scalars := append(qC, &l, &r, rl, &o, // first part - _s1, coeffZ, zhZeta, zetaNPlusTwoZh, zetaNPlusTwoSquareZh, // second & third part + _s1, coeffZ, // second part + zhZeta, // third part ) var msmOpts []algopts.AlgebraOption From c4d989b585c5eb871a86a9b8e71ed6ac6fb965d0 Mon Sep 17 00:00:00 2001 From: Arya Tabaie Date: Wed, 10 Apr 2024 20:21:49 -0500 Subject: [PATCH 42/55] perf allow for dirty padding of decompression output (#1100) --- std/compress/lzss/snark.go | 36 +++++++++++++++++++++++++-------- std/compress/lzss/snark_test.go | 35 ++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 8 deletions(-) diff --git a/std/compress/lzss/snark.go b/std/compress/lzss/snark.go index 47f38d6f6e..68b9773b36 100644 --- a/std/compress/lzss/snark.go +++ b/std/compress/lzss/snark.go @@ -18,7 +18,12 @@ import ( // it is recommended to pack the dictionary using compress.Pack and take a MiMC checksum of it. // d will consist of bytes // It returns the length of d as a frontend.Variable; if the decompressed stream doesn't fit in d, dLength will be "-1" -func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variable, d, dict []frontend.Variable) (dLength frontend.Variable, err error) { +func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variable, d, dict []frontend.Variable, options ...DecompressionOption) (dLength frontend.Variable, err error) { + + var aux decompressionAux + for _, opt := range options { + opt(&aux) + } api.AssertIsLessOrEqual(cLength, len(c)) // sanity check @@ -100,8 +105,11 @@ func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variab // write to output outVal := api.Select(copying, toCopy, curr) - // TODO previously the last byte of the output kept getting repeated. That can be worked with. If there was a reason to save some 600K constraints in the zkEVM decompressor, take this out again - d[outI] = plonk.EvaluateExpression(api, outVal, eof, 1, 0, -1, 0) // write zeros past eof + if aux.noZeroPaddingOutput { + d[outI] = outVal + } else { + d[outI] = plonk.EvaluateExpression(api, outVal, eof, 1, 0, -1, 0) // write zeros past eof + } // WARNING: curr modified by MulAcc outTable.Insert(d[outI]) @@ -114,11 +122,8 @@ func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variab // TODO Try removing this check and requiring the user to pad the input with nonzeros // TODO Change inner to mulacc once https://github.com/Consensys/gnark/pull/859 is merged // inI = inI + inIDelta * (1 - eof) - if eof == 0 { - inI = api.Add(inI, inIDelta) - } else { - inI = api.Add(inI, plonk.EvaluateExpression(api, inIDelta, eof, 1, 0, -1, 0)) // if eof, stay put - } + + inI = api.Add(inI, plonk.EvaluateExpression(api, inIDelta, eof, 1, 0, -1, 0)) // if eof, stay put eofNow := rangeChecker.IsLessThan(8, api.Sub(cLength, inI)) // less than a byte left; meaning we are at the end of the input @@ -179,3 +184,18 @@ func RegisterHints() { hint.RegisterHint(internal.BreakUpBytesIntoHalfHint) hint.RegisterHint(compress.UnpackIntoBytesHint) } + +// options and other auxiliary input +type decompressionAux struct { + noZeroPaddingOutput bool +} + +type DecompressionOption func(*decompressionAux) + +// WithoutZeroPaddingOutput disables the feature where all decompressor output past the end is zeroed out +// It saves one constraint per byte of output but necessitates more assignment work +// If using this option, the output will be padded by the first byte of the input past the end +// If further the input is not padded, the output still will be padded with zeros +func WithoutZeroPaddingOutput(aux *decompressionAux) { + aux.noZeroPaddingOutput = true +} diff --git a/std/compress/lzss/snark_test.go b/std/compress/lzss/snark_test.go index c45db3778d..ab1fbfc232 100644 --- a/std/compress/lzss/snark_test.go +++ b/std/compress/lzss/snark_test.go @@ -366,3 +366,38 @@ func TestBuildDecompress1KBto9KB(t *testing.T) { assert.NoError(t, err) fmt.Println(cs.GetNbConstraints()) } + +func TestNoZeroPaddingOutput(t *testing.T) { + assignment := testNoZeroPaddingOutputCircuit{ + C: []frontend.Variable{0, 1, 0, 2, 3, 0, 0, 0}, + D: []frontend.Variable{2, 3, 3}, + CLen: 4, + DLen: 1, + } + circuit := testNoZeroPaddingOutputCircuit{ + C: make([]frontend.Variable, len(assignment.C)), + D: make([]frontend.Variable, len(assignment.D)), + } + + RegisterHints() + test.NewAssert(t).CheckCircuit(&circuit, test.WithValidAssignment(&assignment), test.WithBackends(backend.PLONK), test.WithCurves(ecc.BLS12_377)) +} + +type testNoZeroPaddingOutputCircuit struct { + CLen, DLen frontend.Variable + C, D []frontend.Variable +} + +func (c *testNoZeroPaddingOutputCircuit) Define(api frontend.API) error { + dict := []frontend.Variable{254, 255} + d := make([]frontend.Variable, len(c.D)) + dLen, err := Decompress(api, c.C, c.CLen, d, dict, WithoutZeroPaddingOutput) + if err != nil { + return err + } + api.AssertIsEqual(c.DLen, dLen) + for i := range c.D { + api.AssertIsEqual(c.D[i], d[i]) + } + return nil +} From 65f6f467e18f616f944245e2a3ba73589ddbb4f2 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Thu, 11 Apr 2024 09:30:08 +0200 Subject: [PATCH 43/55] feat: updated comment in fold_state --- backend/plonk/bn254/solidity.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index d2c03cfc56..210854fc87 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -894,7 +894,7 @@ contract PlonkVerifier { } /// @notice Fold the opening proofs at ζ: - /// * at state+state_folded_digest we store: [H] + γ[Linearised_polynomial]+γ²[L] + γ³[R] + γ⁴[O] + γ⁵[S₁] +γ⁶[S₂] + ∑ᵢγ⁶⁺ⁱ[Pi_{i}] + /// * at state+state_folded_digest we store: [Linearised_polynomial]+γ[L] + γ²[R] + γ³[O] + γ⁴[S₁] +γ⁵[S₂] + ∑ᵢγ⁵⁺ⁱ[Pi_{i}] /// * at state+state_folded_claimed_values we store: H(ζ) + γLinearised_polynomial(ζ)+γ²L(ζ) + γ³R(ζ)+ γ⁴O(ζ) + γ⁵S₁(ζ) +γ⁶S₂(ζ) + ∑ᵢγ⁶⁺ⁱPi_{i}(ζ) /// @param aproof pointer to the proof /// acc_gamma stores the γⁱ From 3fd24eaac7e7671f64d0f97a93564bbc5699f029 Mon Sep 17 00:00:00 2001 From: Thomas Piellard Date: Thu, 11 Apr 2024 10:01:18 +0200 Subject: [PATCH 44/55] feat: modified comment --- backend/plonk/bn254/solidity.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 210854fc87..300fef7340 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -1384,9 +1384,8 @@ func (proof *Proof) MarshalSolidity() []byte { tmp32 = proof.ZShiftedOpening.ClaimedValue.Bytes() res = append(res, tmp32[:]...) - // uint256 linearization_polynomial_at_zeta; - // tmp32 = proof.BatchedProof.ClaimedValues[0].Bytes() - // res = append(res, tmp32[:]...) + // we skip the claimed value of the linearised polynomial at zeta because it + // is recomputed by the verifier and plugged in the batch opening proof directly // uint256 opening_at_zeta_proof_x; // uint256 opening_at_zeta_proof_y; From 4dc4a0782f48957bfd7a9f5c5783b0a7d07c1d31 Mon Sep 17 00:00:00 2001 From: Youssef El Housni Date: Thu, 11 Apr 2024 19:17:20 -0400 Subject: [PATCH 45/55] perf(std/plonk): save 1 neg --- std/recursion/plonk/verifier.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/std/recursion/plonk/verifier.go b/std/recursion/plonk/verifier.go index c322da6828..6f88630d4e 100644 --- a/std/recursion/plonk/verifier.go +++ b/std/recursion/plonk/verifier.go @@ -916,9 +916,8 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, _s1 = v.scalarApi.Mul(_s1, alpha) _s1 = v.scalarApi.Mul(_s1, &zu) - constLin := v.scalarApi.Sub(pi, alphaSquareLagrangeOne) - constLin = v.scalarApi.Add(constLin, _s1) - constLin = v.scalarApi.Neg(constLin) + constLin := v.scalarApi.Add(pi, _s1) + constLin = v.scalarApi.Sub(alphaSquareLagrangeOne, constLin) // check that the opening of the linearised polynomial is equal to -constLin openingLinPol := proof.BatchedProof.ClaimedValues[0] @@ -949,10 +948,9 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) PrepareVerification(vk VerifyingKey[FR, tmp = v.scalarApi.Add(tmp, &o) // β*u²*ζ+γ+o _s2 = v.scalarApi.Mul(_s2, tmp) // (l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)(o+β*u²*ζ+γ) _s2 = v.scalarApi.Mul(_s2, alpha) // α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)(o+β*u²*ζ+γ) - _s2 = v.scalarApi.Neg(_s2) // -α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)(o+β*u²*ζ+γ) // α²*L₁(ζ) - α*(l(ζ)+β*ζ+γ)*(r(ζ)+β*u*ζ+γ)*(o(ζ)+β*u²*ζ+γ) - coeffZ := v.scalarApi.Add(alphaSquareLagrangeOne, _s2) + coeffZ := v.scalarApi.Sub(alphaSquareLagrangeOne, _s2) // l(ζ)*r(ζ) rl := v.scalarApi.Mul(&l, &r) From 79aece15fc64482e5814839dfb5159af8d3e16e1 Mon Sep 17 00:00:00 2001 From: Youssef El Housni Date: Thu, 11 Apr 2024 20:01:57 -0400 Subject: [PATCH 46/55] perf(std/plonk): small optims --- std/recursion/plonk/verifier.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/std/recursion/plonk/verifier.go b/std/recursion/plonk/verifier.go index 6f88630d4e..18451b8eed 100644 --- a/std/recursion/plonk/verifier.go +++ b/std/recursion/plonk/verifier.go @@ -1196,8 +1196,8 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) computeIthLagrangeAtZeta(exp frontend.V // \omega^{i} iBits := bits.ToBinary(v.api, exp, bits.WithNbDigits(maxExpBits)) - omegai := one - for i := maxExpBits - 1; i >= 0; i-- { + omegai := v.scalarApi.Select(iBits[maxExpBits-1], &vk.Generator, one) + for i := maxExpBits - 2; i >= 0; i-- { omegai = v.scalarApi.Mul(omegai, omegai) tmp := v.scalarApi.Mul(omegai, &vk.Generator) omegai = v.scalarApi.Select(iBits[i], tmp, omegai) From c73574cf12e2935c34aa8cd97828a36b2a064516 Mon Sep 17 00:00:00 2001 From: Youssef El Housni Date: Fri, 12 Apr 2024 13:45:23 -0400 Subject: [PATCH 47/55] perf(std/plonk): small optim --- std/recursion/plonk/verifier.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/std/recursion/plonk/verifier.go b/std/recursion/plonk/verifier.go index 18451b8eed..5ae3ee0673 100644 --- a/std/recursion/plonk/verifier.go +++ b/std/recursion/plonk/verifier.go @@ -1176,12 +1176,13 @@ func (v *Verifier[FR, G1El, G2El, GtEl]) fixedExpN(n frontend.Variable, s *emula const maxExpBits = 30 // n is power of two. nBits := bits.ToBinary(v.api, n, bits.WithNbDigits(maxExpBits)) - acc := s - res := v.scalarApi.Zero() - for i := range nBits { + res := v.scalarApi.Select(nBits[0], s, v.scalarApi.Zero()) + acc := v.scalarApi.Mul(s, s) + for i := 1; i < maxExpBits-1; i++ { res = v.scalarApi.Select(nBits[i], acc, res) acc = v.scalarApi.Mul(acc, acc) } + res = v.scalarApi.Select(nBits[maxExpBits-1], acc, res) return res } From 747d11f83c8185a5488de38597521eb43b0b336e Mon Sep 17 00:00:00 2001 From: Youssef El Housni Date: Fri, 12 Apr 2024 18:00:57 -0400 Subject: [PATCH 48/55] perf(pairing/bw6,bls12-381): optimize Mul014By014 --- .../emulated/fields_bls12381/e12_pairing.go | 15 +++------------ std/algebra/emulated/fields_bw6761/e6_pairing.go | 15 +++------------ std/algebra/native/fields_bls12377/e12_pairing.go | 3 +-- 3 files changed, 7 insertions(+), 26 deletions(-) diff --git a/std/algebra/emulated/fields_bls12381/e12_pairing.go b/std/algebra/emulated/fields_bls12381/e12_pairing.go index 1810557e14..69b0e9e37b 100644 --- a/std/algebra/emulated/fields_bls12381/e12_pairing.go +++ b/std/algebra/emulated/fields_bls12381/e12_pairing.go @@ -118,24 +118,15 @@ func (e *Ext12) MulBy014(z *E12, c0, c1 *E2) *E12 { // C1: E6{B0: 0, B1: 1, B2: 0}, // } func (e Ext12) Mul014By014(d0, d1, c0, c1 *E2) [5]*E2 { - one := e.Ext2.One() x0 := e.Ext2.Mul(c0, d0) x1 := e.Ext2.Mul(c1, d1) - tmp := e.Ext2.Add(c0, one) - x04 := e.Ext2.Add(d0, one) - x04 = e.Ext2.Mul(x04, tmp) - x04 = e.Ext2.Sub(x04, x0) - x04 = e.Ext2.Sub(x04, one) - tmp = e.Ext2.Add(c0, c1) + x04 := e.Ext2.Add(c0, d0) + tmp := e.Ext2.Add(c0, c1) x01 := e.Ext2.Add(d0, d1) x01 = e.Ext2.Mul(x01, tmp) x01 = e.Ext2.Sub(x01, x0) x01 = e.Ext2.Sub(x01, x1) - tmp = e.Ext2.Add(c1, one) - x14 := e.Ext2.Add(d1, one) - x14 = e.Ext2.Mul(x14, tmp) - x14 = e.Ext2.Sub(x14, x1) - x14 = e.Ext2.Sub(x14, one) + x14 := e.Ext2.Add(c1, d1) zC0B0 := e.Ext2.NonResidue() zC0B0 = e.Ext2.Add(zC0B0, x0) diff --git a/std/algebra/emulated/fields_bw6761/e6_pairing.go b/std/algebra/emulated/fields_bw6761/e6_pairing.go index 8573b66845..e4926c064f 100644 --- a/std/algebra/emulated/fields_bw6761/e6_pairing.go +++ b/std/algebra/emulated/fields_bw6761/e6_pairing.go @@ -182,24 +182,15 @@ func (e *Ext6) MulBy014(z *E6, c0, c1 *baseEl) *E6 { // B1: E3{A0: 0, A1: 1, A2: 0}, // } func (e Ext6) Mul014By014(d0, d1, c0, c1 *baseEl) [5]*baseEl { - one := e.fp.One() x0 := e.fp.Mul(c0, d0) x1 := e.fp.Mul(c1, d1) - tmp := e.fp.Add(c0, one) - x04 := e.fp.Add(d0, one) - x04 = e.fp.Mul(x04, tmp) - x04 = e.fp.Sub(x04, x0) - x04 = e.fp.Sub(x04, one) - tmp = e.fp.Add(c0, c1) + x04 := e.fp.Add(c0, d0) + tmp := e.fp.Add(c0, c1) x01 := e.fp.Add(d0, d1) x01 = e.fp.Mul(x01, tmp) x01 = e.fp.Sub(x01, x0) x01 = e.fp.Sub(x01, x1) - tmp = e.fp.Add(c1, one) - x14 := e.fp.Add(d1, one) - x14 = e.fp.Mul(x14, tmp) - x14 = e.fp.Sub(x14, x1) - x14 = e.fp.Sub(x14, one) + x14 := e.fp.Add(c1, d1) four := emulated.ValueOf[emulated.BW6761Fp](big.NewInt(4)) zC0B0 := e.fp.Sub(x0, &four) diff --git a/std/algebra/native/fields_bls12377/e12_pairing.go b/std/algebra/native/fields_bls12377/e12_pairing.go index c6f87c4a0a..a98616c84d 100644 --- a/std/algebra/native/fields_bls12377/e12_pairing.go +++ b/std/algebra/native/fields_bls12377/e12_pairing.go @@ -62,8 +62,7 @@ func (e *E12) MulBy034(api frontend.API, c3, c4 E2) *E12 { // Mul034By034 multiplication of sparse element (1,0,0,c3,c4,0) by sparse element (1,0,0,d3,d4,0) func Mul034By034(api frontend.API, d3, d4, c3, c4 E2) *[5]E2 { - var one, tmp, x00, x3, x4, x04, x03, x34 E2 - one.SetOne() + var tmp, x00, x3, x4, x04, x03, x34 E2 x3.Mul(api, c3, d3) x4.Mul(api, c4, d4) x04.Add(api, c4, d4) From 704bb4d27d907fd5eb97b91c97114974f7b7024a Mon Sep 17 00:00:00 2001 From: Youssef El Housni Date: Fri, 12 Apr 2024 18:16:42 -0400 Subject: [PATCH 49/55] test: update stats --- internal/stats/latest.stats | Bin 2246 -> 2246 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/internal/stats/latest.stats b/internal/stats/latest.stats index 37f16d35c679bdae9cac75fd13eba33817eed7aa..d43329953f9f2603dd03eb54a49197c0401e1d85 100644 GIT binary patch delta 104 zcmX>mcua6Y=43afIg{_PFQ1&pI#)rkit(?@y}S zvaFrFigC{5M&<>RA2H3Jtjo5WQGBumcua6Y=42PPC6k}C+f9DK7C(6%^PGv9b0)uHnLpW!Wx-@d=CzaE*%wS+!#Gz# w%!2W+$F>g)P{6_Xcb&&3#=ncVXFxt<8 From bdcdb275bd1f3c0b2eca5026dcb17a5979fbca80 Mon Sep 17 00:00:00 2001 From: Youssef El Housni Date: Mon, 15 Apr 2024 18:31:58 -0400 Subject: [PATCH 50/55] perf(pairing/bw6): optimize Miller loop --- .../emulated/fields_bw6761/e6_pairing.go | 27 ++++++++++++++++ std/algebra/emulated/sw_bw6761/pairing.go | 32 ++++++++++++++----- 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/std/algebra/emulated/fields_bw6761/e6_pairing.go b/std/algebra/emulated/fields_bw6761/e6_pairing.go index e4926c064f..20e8a9c444 100644 --- a/std/algebra/emulated/fields_bw6761/e6_pairing.go +++ b/std/algebra/emulated/fields_bw6761/e6_pairing.go @@ -198,6 +198,33 @@ func (e Ext6) Mul014By014(d0, d1, c0, c1 *baseEl) [5]*baseEl { return [5]*baseEl{zC0B0, x01, x1, x04, x14} } +// MulBy01245 multiplies z by an E6 sparse element of the form +// +// E6{ +// B0: E3{A0: c0, A1: c1, A2: c2}, +// B1: E3{A0: 0, A1: c4, A2: c5}, +// } +func (e *Ext6) MulBy01245(z *E6, x [5]*baseEl) *E6 { + c0 := &E3{A0: *x[0], A1: *x[1], A2: *x[2]} + a := e.Ext3.Add(&z.B0, &z.B1) + b := &E3{ + A0: c0.A0, + A1: *e.fp.Add(&c0.A1, x[3]), + A2: *e.fp.Add(&c0.A2, x[4]), + } + a = e.Ext3.Mul(a, b) + b = e.Ext3.Mul(&z.B0, c0) + c := e.Ext3.MulBy12(&z.B1, x[3], x[4]) + z1 := e.Ext3.Sub(a, b) + z1 = e.Ext3.Sub(z1, c) + z0 := e.Ext3.MulByNonResidue(c) + z0 = e.Ext3.Add(z0, b) + return &E6{ + B0: *z0, + B1: *z1, + } +} + // Mul01245By014 multiplies two E6 sparse element of the form // // E6{ diff --git a/std/algebra/emulated/sw_bw6761/pairing.go b/std/algebra/emulated/sw_bw6761/pairing.go index c032b8f6b7..d5b0ac4fa7 100644 --- a/std/algebra/emulated/sw_bw6761/pairing.go +++ b/std/algebra/emulated/sw_bw6761/pairing.go @@ -358,19 +358,35 @@ func (pr Pairing) millerLoopLines(P []*G1Affine, lines []lineEvaluations) (*GTEl // (∏ᵢfᵢ)² result = pr.Square(result) - for k := 0; k < n; k++ { - result = pr.MulBy014(result, - pr.curveF.Mul(&lines[k][0][i].R1, yInv[k]), - pr.curveF.Mul(&lines[k][0][i].R0, xNegOverY[k]), - ) - } - if i > 0 && loopCounter2[i]*3+loopCounter1[i] != 0 { for k := 0; k < n; k++ { - result = pr.MulBy014(result, + prodLines = pr.Mul014By014( + pr.curveF.Mul(&lines[k][0][i].R1, yInv[k]), + pr.curveF.Mul(&lines[k][0][i].R0, xNegOverY[k]), pr.curveF.Mul(&lines[k][1][i].R1, yInv[k]), pr.curveF.Mul(&lines[k][1][i].R0, xNegOverY[k]), ) + result = pr.MulBy01245(result, prodLines) + } + } else { + // if number of lines is odd, mul last line by res + // works for n=1 as well + if n%2 != 0 { + // ℓ × res + result = pr.MulBy014(result, + pr.curveF.Mul(&lines[n-1][0][i].R1, yInv[n-1]), + pr.curveF.Mul(&lines[n-1][0][i].R0, xNegOverY[n-1]), + ) + } + // mul lines 2-by-2 + for k := 1; k < n; k += 2 { + prodLines = pr.Mul014By014( + pr.curveF.Mul(&lines[k][0][i].R1, yInv[k]), + pr.curveF.Mul(&lines[k][0][i].R0, xNegOverY[k]), + pr.curveF.Mul(&lines[k-1][0][i].R1, yInv[k-1]), + pr.curveF.Mul(&lines[k-1][0][i].R0, xNegOverY[k-1]), + ) + result = pr.MulBy01245(result, prodLines) } } } From 5af51c322ea104151e39ac0d5977f3e8e8a9b102 Mon Sep 17 00:00:00 2001 From: Youssef El Housni Date: Mon, 15 Apr 2024 18:41:21 -0400 Subject: [PATCH 51/55] test: update stats --- internal/stats/latest.stats | Bin 2246 -> 2246 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/internal/stats/latest.stats b/internal/stats/latest.stats index d43329953f9f2603dd03eb54a49197c0401e1d85..d961ee475d40ad729517868ae29ff3a978ba7e78 100644 GIT binary patch delta 131 zcmX>mcua6Y=HwS_@sray5+-}GESS8GdCufc*4>jCnb%H!&TcnZi|P8r%sG=^vCNzvnVq!JwB=2%-qb7nS%ieI2ivXEOcZ1J3l7^ TA@qkaZ{sJ%KaAnKxnP0-sR=4C delta 134 zcmX>mcua6Y=H!Pgb0*Jc-#qyYoAG2f4*SVmEY~N?Gq0U|f^p7d4aNnNYgy+r>P?nq z6Q5kew0N=%+meZy@st0vEtq_U+0LG?AThHjGcP?psoc!m%#fLb0SY)6|0ez7V*I-} Z;SNIR4`aE77ULhr_`QoDf)kG=0RYCVD`x-z From c38cdd37b96d59a675ef54cfa51f8d01f7cb3bb3 Mon Sep 17 00:00:00 2001 From: bernard-wagner Date: Thu, 18 Apr 2024 13:04:16 +0200 Subject: [PATCH 52/55] feat: groth16 solidity use calldatacopy for commitments (#1097) * feat: groth16 solidity use calldatacopy for commitments * consistently use publicAndCommitmentCommittedOffset * do not redeclare publicAndCommitmentCommitted * contiguous segments * only if index exists * test: re-enable solidity checks --------- Co-authored-by: Ivo Kubjas --- backend/groth16/bn254/solidity.go | 61 +++++++++++++++++++++----- std/math/polynomial/polynomial_test.go | 8 ++-- 2 files changed, 53 insertions(+), 16 deletions(-) diff --git a/backend/groth16/bn254/solidity.go b/backend/groth16/bn254/solidity.go index fb3c2fe7b8..5f0b1a504e 100644 --- a/backend/groth16/bn254/solidity.go +++ b/backend/groth16/bn254/solidity.go @@ -541,17 +541,36 @@ contract Verifier { } {{- end}} (uint256 Px, uint256 Py) = decompress_g1(compressedCommitmentPok); + + uint256[] memory publicAndCommitmentCommitted; {{- range $i := intRange $numCommitments }} + {{- $pcIndex := index $PublicAndCommitmentCommitted $i }} + {{- if gt (len $pcIndex) 0 }} + publicAndCommitmentCommitted = new uint256[]({{(len $pcIndex)}}); + assembly ("memory-safe") { + let publicAndCommitmentCommittedOffset := add(publicAndCommitmentCommitted, 0x20) + {{- $segment_start := index $pcIndex 0 }} + {{- $segment_end := index $pcIndex 0 }} + {{- $l := 0 }} + {{- range $k := intRange (sub (len $pcIndex) 1) }} + {{- $next := index $pcIndex (sum $k 1) }} + {{- if ne $next (sum $segment_end 1) }} + calldatacopy(add(publicAndCommitmentCommittedOffset, {{mul $l 0x20}}), add(input, {{mul 0x20 (sub $segment_start 1)}}), {{mul 0x20 (sum 1 (sub $segment_end $segment_start))}}) + {{- $segment_start = $next }} + {{- $l = (sum $k 1) }} + {{- end }} + {{- $segment_end = $next }} + {{- end }} + calldatacopy(add(publicAndCommitmentCommittedOffset, {{mul $l 0x20}}), add(input, {{mul 0x20 (sub $segment_start 1)}}), {{mul 0x20 (sum 1 (sub $segment_end $segment_start))}}) + } + {{- end }} + publicCommitments[{{$i}}] = uint256( sha256( abi.encodePacked( commitments[{{mul $i 2}}], - commitments[{{sum (mul $i 2) 1}}] - {{- $pcIndex := index $PublicAndCommitmentCommitted $i }} - {{- range $j := intRange (len $pcIndex) }} - {{- $l := index $pcIndex $j }} - ,input[{{sub $l 1}}] - {{- end }} + commitments[{{sum (mul $i 2) 1}}], + publicAndCommitmentCommitted ) ) ) % R; @@ -670,17 +689,35 @@ contract Verifier { {{- else }} // HashToField uint256[{{$numCommitments}}] memory publicCommitments; + uint256[] memory publicAndCommitmentCommitted; {{- range $i := intRange $numCommitments }} + {{- $pcIndex := index $PublicAndCommitmentCommitted $i }} + {{- if gt (len $pcIndex) 0 }} + publicAndCommitmentCommitted = new uint256[]({{(len $pcIndex)}}); + assembly ("memory-safe") { + let publicAndCommitmentCommittedOffset := add(publicAndCommitmentCommitted, 0x20) + {{- $segment_start := index $pcIndex 0 }} + {{- $segment_end := index $pcIndex 0 }} + {{- $l := 0 }} + {{- range $k := intRange (sub (len $pcIndex) 1) }} + {{- $next := index $pcIndex (sum $k 1) }} + {{- if ne $next (sum $segment_end 1) }} + calldatacopy(add(publicAndCommitmentCommittedOffset, {{mul $l 0x20}}), add(input, {{mul 0x20 (sub $segment_start 1)}}), {{mul 0x20 (sum 1 (sub $segment_end $segment_start))}}) + {{- $segment_start = $next }} + {{- $l = (sum $k 1) }} + {{- end }} + {{- $segment_end = $next }} + {{- end }} + calldatacopy(add(publicAndCommitmentCommittedOffset, {{mul $l 0x20}}), add(input, {{mul 0x20 (sub $segment_start 1)}}), {{mul 0x20 (sum 1 (sub $segment_end $segment_start))}}) + } + {{- end }} + publicCommitments[{{$i}}] = uint256( sha256( abi.encodePacked( commitments[{{mul $i 2}}], - commitments[{{sum (mul $i 2) 1}}] - {{- $pcIndex := index $PublicAndCommitmentCommitted $i }} - {{- range $j := intRange (len $pcIndex) }} - {{- $l := index $pcIndex $j }} - ,input[{{sub $l 1}}] - {{- end }} + commitments[{{sum (mul $i 2) 1}}], + publicAndCommitmentCommitted ) ) ) % R; diff --git a/std/math/polynomial/polynomial_test.go b/std/math/polynomial/polynomial_test.go index ccd2c08b64..4fd2929533 100644 --- a/std/math/polynomial/polynomial_test.go +++ b/std/math/polynomial/polynomial_test.go @@ -42,7 +42,7 @@ func testEvalPoly[FR emulated.FieldParams](t *testing.T, p []int64, at int64, ev Evaluation: emulated.ValueOf[FR](evaluation), } - assert.CheckCircuit(&evalPolyCircuit[FR]{P: make([]emulated.Element[FR], len(p))}, test.WithValidAssignment(&witness), test.NoSolidityChecks()) + assert.CheckCircuit(&evalPolyCircuit[FR]{P: make([]emulated.Element[FR], len(p))}, test.WithValidAssignment(&witness)) } func TestEvalPoly(t *testing.T) { @@ -97,7 +97,7 @@ func testEvalMultiLin[FR emulated.FieldParams](t *testing.T) { Evaluation: emulated.ValueOf[FR](17), } - assert.CheckCircuit(&evalMultiLinCircuit[FR]{M: make([]emulated.Element[FR], 4), At: make([]emulated.Element[FR], 2)}, test.WithValidAssignment(&witness), test.NoSolidityChecks()) + assert.CheckCircuit(&evalMultiLinCircuit[FR]{M: make([]emulated.Element[FR], 4), At: make([]emulated.Element[FR], 2)}, test.WithValidAssignment(&witness)) } type evalEqCircuit[FR emulated.FieldParams] struct { @@ -143,7 +143,7 @@ func testEvalEq[FR emulated.FieldParams](t *testing.T) { Eq: emulated.ValueOf[FR](148665), } - assert.CheckCircuit(&evalEqCircuit[FR]{X: make([]emulated.Element[FR], 4), Y: make([]emulated.Element[FR], 4)}, test.WithValidAssignment(&witness), test.NoSolidityChecks()) + assert.CheckCircuit(&evalEqCircuit[FR]{X: make([]emulated.Element[FR], 4), Y: make([]emulated.Element[FR], 4)}, test.WithValidAssignment(&witness)) } type interpolateLDECircuit[FR emulated.FieldParams] struct { @@ -179,7 +179,7 @@ func testInterpolateLDE[FR emulated.FieldParams](t *testing.T, at int64, values Expected: emulated.ValueOf[FR](expected), } - assert.CheckCircuit(&interpolateLDECircuit[FR]{Values: make([]emulated.Element[FR], len(values))}, test.WithValidAssignment(assignment), test.NoSolidityChecks()) + assert.CheckCircuit(&interpolateLDECircuit[FR]{Values: make([]emulated.Element[FR], len(values))}, test.WithValidAssignment(assignment)) } func TestInterpolateLDEOnRange(t *testing.T) { From 3c506fd9526105596c77beec2c316a0997af4431 Mon Sep 17 00:00:00 2001 From: Ivo Kubjas Date: Thu, 18 Apr 2024 16:35:16 +0200 Subject: [PATCH 53/55] feat: expmod with variable modulus (#1090) * chore: remove unused arguments * feat: allow non-default modulus * feat: implement non-default modulus arithmetic * test: tests for non-default arithmetic * remove debug calls * chore: clean only if set * feat: add zero check with custom modulus * feat: change quotient length in case of var modulus * refactor: subtraction padding into callable function * feat: implement variable modulus subtraction * feat: implement variable modulus equality assertion * test: variable modulus tests * feat: implement custom mod exp * refactor: rename methods * refactor: use single impl of mulmod and checkzero * feat: add automatic lazy reduction for var-mod addition * refactor: make var-mod sub private It is difficult to implement automatic reduction here due to the recursive dependency of methods. As the goal is to provide only very limited API for now, then make it private for now. * refactor: move parameters * refactor: move var-mod methods to Field * refactor: subPaddingHint to hints file * docs: add package documentation * feat: implement fixed-mod exp * test: make tests cheaper to run * fix: handle mul hint edge case when modulus is zero * feat: implement expmod precompile * perf: select first bit in expmod instead of mul * docs: indicate implementation of Expmod --------- Co-authored-by: Youssef El Housni --- std/evmprecompiles/05-expmod.go | 30 +++++ std/evmprecompiles/05-expmod_test.go | 86 +++++++++++++ std/evmprecompiles/doc.go | 2 +- std/math/emulated/composition.go | 9 +- std/math/emulated/composition_test.go | 2 +- std/math/emulated/custommod.go | 99 ++++++++++++++ std/math/emulated/custommod_test.go | 171 +++++++++++++++++++++++++ std/math/emulated/doc.go | 12 ++ std/math/emulated/element_test.go | 39 ++++++ std/math/emulated/emparams/emparams.go | 38 ++++++ std/math/emulated/field_assert.go | 2 +- std/math/emulated/field_mul.go | 74 +++++++++-- std/math/emulated/field_ops.go | 5 +- std/math/emulated/hints.go | 49 +++++++ 14 files changed, 593 insertions(+), 25 deletions(-) create mode 100644 std/evmprecompiles/05-expmod_test.go create mode 100644 std/math/emulated/custommod.go create mode 100644 std/math/emulated/custommod_test.go diff --git a/std/evmprecompiles/05-expmod.go b/std/evmprecompiles/05-expmod.go index 6b1eb16123..442ba88095 100644 --- a/std/evmprecompiles/05-expmod.go +++ b/std/evmprecompiles/05-expmod.go @@ -1 +1,31 @@ package evmprecompiles + +import ( + "fmt" + + "github.com/consensys/gnark/frontend" + "github.com/consensys/gnark/std/math/emulated" + "github.com/consensys/gnark/std/math/emulated/emparams" +) + +// Expmod implements [MODEXP] precompile contract at address 0x05. +// +// Internally, uses 4k elements for representing the base, exponent and modulus, +// upper bounding the sizes of the inputs. The runtime is constant regardless of +// the actual length of the inputs. +// +// [MODEXP]: https://ethereum.github.io/execution-specs/autoapi/ethereum/paris/vm/precompiled_contracts/expmod/index.html +func Expmod(api frontend.API, base, exp, modulus *emulated.Element[emparams.Mod1e4096]) *emulated.Element[emparams.Mod1e4096] { + // x^0 = 1 + // x mod 0 = 0 + f, err := emulated.NewField[emparams.Mod1e4096](api) + if err != nil { + panic(fmt.Sprintf("new field: %v", err)) + } + // in case modulus is zero, then need to compute with dummy values and return zero as a result + isZeroMod := f.IsZero(modulus) + modulus = f.Select(isZeroMod, f.One(), modulus) + res := f.ModExp(base, exp, modulus) + res = f.Select(isZeroMod, f.Zero(), res) + return res +} diff --git a/std/evmprecompiles/05-expmod_test.go b/std/evmprecompiles/05-expmod_test.go new file mode 100644 index 0000000000..5de7d95bcb --- /dev/null +++ b/std/evmprecompiles/05-expmod_test.go @@ -0,0 +1,86 @@ +package evmprecompiles + +import ( + "crypto/rand" + "fmt" + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark/frontend" + "github.com/consensys/gnark/std/math/emulated" + "github.com/consensys/gnark/std/math/emulated/emparams" + "github.com/consensys/gnark/test" +) + +type expmodCircuit struct { + Base emulated.Element[emparams.Mod1e4096] + Exp emulated.Element[emparams.Mod1e4096] + Mod emulated.Element[emparams.Mod1e4096] + Result emulated.Element[emparams.Mod1e4096] + edgeCases bool +} + +func (c *expmodCircuit) Define(api frontend.API) error { + res := Expmod(api, &c.Base, &c.Exp, &c.Mod) + f, err := emulated.NewField[emparams.Mod1e4096](api) + if err != nil { + return fmt.Errorf("new field: %w", err) + } + if c.edgeCases { + // cannot use ModAssertIsEqual for edge cases. But the output is either + // 0 or 1 so can use AssertIsEqual + f.AssertIsEqual(res, &c.Result) + } else { + // for random case need to use ModAssertIsEqual + f.ModAssertIsEqual(&c.Result, res, &c.Mod) + } + return nil +} + +func testInstance(edgeCases bool, base, exp, modulus, result *big.Int) error { + circuit := &expmodCircuit{edgeCases: edgeCases} + assignment := &expmodCircuit{ + Base: emulated.ValueOf[emparams.Mod1e4096](base), + Exp: emulated.ValueOf[emparams.Mod1e4096](exp), + Mod: emulated.ValueOf[emparams.Mod1e4096](modulus), + Result: emulated.ValueOf[emparams.Mod1e4096](result), + } + return test.IsSolved(circuit, assignment, ecc.BLS12_377.ScalarField()) +} + +func TestRandomInstance(t *testing.T) { + assert := test.NewAssert(t) + for _, bits := range []int{256, 512, 1024, 2048, 4096} { + assert.Run(func(assert *test.Assert) { + modulus := new(big.Int).Lsh(big.NewInt(1), uint(bits)) + base, _ := rand.Int(rand.Reader, modulus) + exp, _ := rand.Int(rand.Reader, modulus) + res := new(big.Int).Exp(base, exp, modulus) + err := testInstance(false, base, exp, modulus, res) + assert.NoError(err) + }, fmt.Sprintf("random-%d", bits)) + } +} + +func TestEdgeCases(t *testing.T) { + assert := test.NewAssert(t) + testCases := []struct { + base, exp, modulus, result *big.Int + }{ + {big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)}, // 0^0 = 0 mod 0 + {big.NewInt(0), big.NewInt(0), big.NewInt(1), big.NewInt(1)}, // 0^0 = 1 mod 1 + {big.NewInt(0), big.NewInt(0), big.NewInt(123), big.NewInt(1)}, // 0^0 = 1 mod 123 + {big.NewInt(123), big.NewInt(123), big.NewInt(0), big.NewInt(0)}, // 123^123 = 0 mod 0 + {big.NewInt(123), big.NewInt(123), big.NewInt(0), big.NewInt(0)}, // 123^123 = 0 mod 1 + {big.NewInt(0), big.NewInt(123), big.NewInt(123), big.NewInt(0)}, // 0^123 = 0 mod 123 + {big.NewInt(123), big.NewInt(0), big.NewInt(123), big.NewInt(1)}, // 123^0 = 1 mod 123 + + } + for i, tc := range testCases { + assert.Run(func(assert *test.Assert) { + err := testInstance(true, tc.base, tc.exp, tc.modulus, tc.result) + assert.NoError(err) + }, fmt.Sprintf("edge-%d", i)) + } +} diff --git a/std/evmprecompiles/doc.go b/std/evmprecompiles/doc.go index 7c515eaa51..9b7dc431a8 100644 --- a/std/evmprecompiles/doc.go +++ b/std/evmprecompiles/doc.go @@ -7,7 +7,7 @@ // 2. SHA256 ❌ -- in progress // 3. RIPEMD160 ❌ -- postponed // 4. ID ❌ -- trivial to implement without function -// 5. EXPMOD ❌ -- in progress +// 5. EXPMOD ✅ -- function [Expmod] // 6. BN_ADD ✅ -- function [ECAdd] // 7. BN_MUL ✅ -- function [ECMul] // 8. SNARKV ✅ -- function [ECPair] diff --git a/std/math/emulated/composition.go b/std/math/emulated/composition.go index 79b0216a80..bce35e7442 100644 --- a/std/math/emulated/composition.go +++ b/std/math/emulated/composition.go @@ -66,10 +66,7 @@ func decompose(input *big.Int, nbBits uint, res []*big.Int) error { // // then no such underflow happens and s = a-b (mod p) as the padding is multiple // of p. -func subPadding[T FieldParams](overflow uint, nbLimbs uint) []*big.Int { - var fp T - p := fp.Modulus() - bitsPerLimbs := fp.BitsPerLimb() +func subPadding(modulus *big.Int, bitsPerLimbs uint, overflow uint, nbLimbs uint) []*big.Int { // first, we build a number nLimbs, such that nLimbs > b; // here b is defined by its bounds, that is b is an element with nbLimbs of (bitsPerLimbs+overflow) @@ -86,8 +83,8 @@ func subPadding[T FieldParams](overflow uint, nbLimbs uint) []*big.Int { panic(fmt.Sprintf("recompose: %v", err)) } // mod reduce n, and negate it - n.Mod(n, p) - n.Sub(p, n) + n.Mod(n, modulus) + n.Sub(modulus, n) // construct pad such that: // pad := n - neg(n mod p) == kp diff --git a/std/math/emulated/composition_test.go b/std/math/emulated/composition_test.go index d1c59289cc..25ef0430f9 100644 --- a/std/math/emulated/composition_test.go +++ b/std/math/emulated/composition_test.go @@ -54,7 +54,7 @@ func testSubPadding[T FieldParams](t *testing.T) { assert := test.NewAssert(t) for i := fp.NbLimbs(); i < 2*fp.NbLimbs(); i++ { assert.Run(func(assert *test.Assert) { - limbs := subPadding[T](0, i) + limbs := subPadding(fp.Modulus(), fp.BitsPerLimb(), 0, i) padValue := new(big.Int) if err := recompose(limbs, fp.BitsPerLimb(), padValue); err != nil { assert.FailNow("recompose", err) diff --git a/std/math/emulated/custommod.go b/std/math/emulated/custommod.go new file mode 100644 index 0000000000..2f5cbaca1b --- /dev/null +++ b/std/math/emulated/custommod.go @@ -0,0 +1,99 @@ +package emulated + +import ( + "errors" + + "github.com/consensys/gnark/frontend" +) + +// ModMul computes a*b mod modulus. Instead of taking modulus as a constant +// parametrized by T, it is passed as an argument. This allows to use a variable +// modulus in the circuit. Type parameter T should be sufficiently big to fit a, +// b and modulus. Recommended to use [emparams.Mod1e512] or +// [emparams.Mod1e4096]. +// +// NB! circuit complexity depends on T rather on the actual length of the modulus. +func (f *Field[T]) ModMul(a, b *Element[T], modulus *Element[T]) *Element[T] { + res := f.mulMod(a, b, 0, modulus) + return res +} + +// ModAdd computes a+b mod modulus. Instead of taking modulus as a constant +// parametrized by T, it is passed as an argument. This allows to use a variable +// modulus in the circuit. Type parameter T should be sufficiently big to fit a, +// b and modulus. Recommended to use [emparams.Mod1e512] or +// [emparams.Mod1e4096]. +// +// NB! circuit complexity depends on T rather on the actual length of the modulus. +func (f *Field[T]) ModAdd(a, b *Element[T], modulus *Element[T]) *Element[T] { + // inlined version of [Field.reduceAndOp] which uses variable-modulus reduction + var nextOverflow uint + var err error + var target overflowError + for nextOverflow, err = f.addPreCond(a, b); errors.As(err, &target); nextOverflow, err = f.addPreCond(a, b) { + if errors.As(err, &target) { + if !target.reduceRight { + a = f.mulMod(a, f.shortOne(), 0, modulus) + } else { + b = f.mulMod(b, f.shortOne(), 0, modulus) + } + } + } + res := f.add(a, b, nextOverflow) + return res +} + +func (f *Field[T]) modSub(a, b *Element[T], modulus *Element[T]) *Element[T] { + // like fixed modulus subtraction, but for sub padding need to use hint + // instead of assuming T as a constant. And when doing as a hint, then need + // to assert that the padding is a multiple of the modulus (done inside callSubPaddingHint) + nextOverflow := max(b.overflow+1, a.overflow) + 1 + nbLimbs := max(len(a.Limbs), len(b.Limbs)) + limbs := make([]frontend.Variable, nbLimbs) + padding := f.computeSubPaddingHint(b.overflow, uint(nbLimbs), modulus) + for i := range limbs { + limbs[i] = padding.Limbs[i] + if i < len(a.Limbs) { + limbs[i] = f.api.Add(limbs[i], a.Limbs[i]) + } + if i < len(b.Limbs) { + limbs[i] = f.api.Sub(limbs[i], b.Limbs[i]) + } + } + res := f.newInternalElement(limbs, nextOverflow) + return res +} + +// ModAssertIsEqual asserts equality of a and b mod modulus. Instead of taking +// modulus as a constant parametrized by T, it is passed as an argument. This +// allows to use a variable modulus in the circuit. Type parameter T should be +// sufficiently big to fit a, b and modulus. Recommended to use +// [emparams.Mod1e512] or [emparams.Mod1e4096]. +// +// NB! circuit complexity depends on T rather on the actual length of the modulus. +func (f *Field[T]) ModAssertIsEqual(a, b *Element[T], modulus *Element[T]) { + // like fixed modulus AssertIsEqual, but uses current Sub implementation for + // computing the diff + diff := f.modSub(b, a, modulus) + f.checkZero(diff, modulus) +} + +// ModExp computes base^exp mod modulus. Instead of taking modulus as a constant +// parametrized by T, it is passed as an argument. This allows to use a variable +// modulus in the circuit. Type parameter T should be sufficiently big to fit +// base, exp and modulus. Recommended to use [emparams.Mod1e512] or +// [emparams.Mod1e4096]. +// +// NB! circuit complexity depends on T rather on the actual length of the modulus. +func (f *Field[T]) ModExp(base, exp, modulus *Element[T]) *Element[T] { + expBts := f.ToBits(exp) + n := len(expBts) + res := f.Select(expBts[0], base, f.One()) + base = f.ModMul(base, base, modulus) + for i := 1; i < n-1; i++ { + res = f.Select(expBts[i], f.ModMul(base, res, modulus), res) + base = f.ModMul(base, base, modulus) + } + res = f.Select(expBts[n-1], f.ModMul(base, res, modulus), res) + return res +} diff --git a/std/math/emulated/custommod_test.go b/std/math/emulated/custommod_test.go new file mode 100644 index 0000000000..a399769322 --- /dev/null +++ b/std/math/emulated/custommod_test.go @@ -0,0 +1,171 @@ +package emulated + +import ( + "crypto/rand" + "fmt" + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark/frontend" + "github.com/consensys/gnark/std/math/emulated/emparams" + "github.com/consensys/gnark/test" +) + +type variableEquality[T FieldParams] struct { + Modulus Element[T] + A, B Element[T] +} + +func (c *variableEquality[T]) Define(api frontend.API) error { + f, err := NewField[T](api) + if err != nil { + return fmt.Errorf("new variable modulus: %w", err) + } + f.ModAssertIsEqual(&c.A, &c.B, &c.Modulus) + return nil +} + +func TestVariableEquality(t *testing.T) { + assert := test.NewAssert(t) + modulus, _ := new(big.Int).SetString("4294967311", 10) + a := big.NewInt(10) + b := new(big.Int).Add(a, modulus) + circuit := &variableEquality[emparams.Mod1e512]{} + assignment := &variableEquality[emparams.Mod1e512]{ + Modulus: ValueOf[emparams.Mod1e512](modulus), + A: ValueOf[emparams.Mod1e512](a), + B: ValueOf[emparams.Mod1e512](b), + } + err := test.IsSolved(circuit, assignment, ecc.BLS12_377.ScalarField()) + assert.NoError(err) +} + +type variableAddition[T FieldParams] struct { + Modulus Element[T] + A, B Element[T] + Expected Element[T] +} + +func (c *variableAddition[T]) Define(api frontend.API) error { + f, err := NewField[T](api) + if err != nil { + return fmt.Errorf("new variable modulus: %w", err) + } + res := f.ModAdd(&c.A, &c.B, &c.Modulus) + f.ModAssertIsEqual(&c.Expected, res, &c.Modulus) + return nil +} + +func TestVariableAddition(t *testing.T) { + assert := test.NewAssert(t) + modulus, _ := new(big.Int).SetString("4294967311", 10) + circuit := &variableAddition[emparams.Mod1e512]{} + assignment := &variableAddition[emparams.Mod1e512]{ + Modulus: ValueOf[emparams.Mod1e512](modulus), + A: ValueOf[emparams.Mod1e512](10), + B: ValueOf[emparams.Mod1e512](20), + Expected: ValueOf[emparams.Mod1e512](30), + } + err := test.IsSolved(circuit, assignment, ecc.BLS12_377.ScalarField()) + assert.NoError(err) +} + +type variableSubtraction[T FieldParams] struct { + Modulus Element[T] + A, B Element[T] + Expected Element[T] +} + +func (c *variableSubtraction[T]) Define(api frontend.API) error { + f, err := NewField[T](api) + if err != nil { + return fmt.Errorf("new variable modulus: %w", err) + } + res := f.modSub(&c.A, &c.B, &c.Modulus) + f.ModAssertIsEqual(&c.Expected, res, &c.Modulus) + return nil +} + +func TestVariableSubtraction(t *testing.T) { + assert := test.NewAssert(t) + modulus, _ := new(big.Int).SetString("4294967311", 10) + circuit := &variableSubtraction[emparams.Mod1e512]{} + res := new(big.Int).Sub(modulus, big.NewInt(10)) + assignment := &variableSubtraction[emparams.Mod1e512]{ + Modulus: ValueOf[emparams.Mod1e512](modulus), + A: ValueOf[emparams.Mod1e512](10), + B: ValueOf[emparams.Mod1e512](20), + Expected: ValueOf[emparams.Mod1e512](res), + } + err := test.IsSolved(circuit, assignment, ecc.BLS12_377.ScalarField()) + assert.NoError(err) +} + +type variableMultiplication[T FieldParams] struct { + Modulus Element[T] + A, B Element[T] + Expected Element[T] +} + +func (c *variableMultiplication[T]) Define(api frontend.API) error { + f, err := NewField[T](api) + if err != nil { + return fmt.Errorf("new variable modulus: %w", err) + } + res := f.ModMul(&c.A, &c.B, &c.Modulus) + f.ModAssertIsEqual(&c.Expected, res, &c.Modulus) + return nil +} + +func TestVariableMultiplication(t *testing.T) { + assert := test.NewAssert(t) + modulus, _ := new(big.Int).SetString("4294967311", 10) + a, _ := rand.Int(rand.Reader, modulus) + b, _ := rand.Int(rand.Reader, modulus) + exp := new(big.Int).Mul(a, b) + exp.Mod(exp, modulus) + circuit := &variableMultiplication[emparams.Mod1e512]{} + assignment := &variableMultiplication[emparams.Mod1e512]{ + Modulus: ValueOf[emparams.Mod1e512](modulus), + A: ValueOf[emparams.Mod1e512](a), + B: ValueOf[emparams.Mod1e512](b), + Expected: ValueOf[emparams.Mod1e512](exp), + } + err := test.IsSolved(circuit, assignment, ecc.BLS12_377.ScalarField()) + assert.NoError(err) +} + +type variableExp[T FieldParams] struct { + Modulus Element[T] + Base Element[T] + Exp Element[T] + Expected Element[T] +} + +func (c *variableExp[T]) Define(api frontend.API) error { + f, err := NewField[T](api) + if err != nil { + return fmt.Errorf("new variable modulus: %w", err) + } + res := f.ModExp(&c.Base, &c.Exp, &c.Modulus) + f.ModAssertIsEqual(&c.Expected, res, &c.Modulus) + return nil +} + +func TestVariableExp(t *testing.T) { + assert := test.NewAssert(t) + modulus, _ := new(big.Int).SetString("4294967311", 10) + base, _ := rand.Int(rand.Reader, modulus) + exp, _ := rand.Int(rand.Reader, modulus) + expected := new(big.Int).Exp(base, exp, modulus) + circuit := &variableExp[emparams.Mod1e512]{} + assignment := &variableExp[emparams.Mod1e512]{ + Modulus: ValueOf[emparams.Mod1e512](modulus), + Base: ValueOf[emparams.Mod1e512](base), + Exp: ValueOf[emparams.Mod1e512](exp), + Expected: ValueOf[emparams.Mod1e512](expected), + } + err := test.IsSolved(circuit, assignment, ecc.BLS12_377.ScalarField()) + assert.NoError(err) +} diff --git a/std/math/emulated/doc.go b/std/math/emulated/doc.go index 61a6e54288..6f4685f253 100644 --- a/std/math/emulated/doc.go +++ b/std/math/emulated/doc.go @@ -184,5 +184,17 @@ The package currently does not explicitly differentiate between constant and variable elements. The builder may track some elements as being constants. Some operations have a fast track path for cases when all inputs are constants. There is [Field.MulConst], which provides variable by constant multiplication. + +# Variable-modulus operations + +The package also exposes methods for performing operations with variable +modulus. The modulus is represented as an element and is not required to be +prime. The methods for variable-modulus operations are [Field.ModMul], +[Field.ModAdd], [Field.ModExp] and [Field.ModAssertIsEqual]. The modulus is +passed as an argument to the operation. + +The type parameter for the [Field] should be sufficiently big to allow to fit +the inputs and the modulus. Recommended to use predefined [emparams.Mod1e512] or +[emparams.Mod1e4096]. */ package emulated diff --git a/std/math/emulated/element_test.go b/std/math/emulated/element_test.go index 8954fc4d69..675f296596 100644 --- a/std/math/emulated/element_test.go +++ b/std/math/emulated/element_test.go @@ -13,6 +13,7 @@ import ( "github.com/consensys/gnark/frontend" "github.com/consensys/gnark/frontend/cs/r1cs" "github.com/consensys/gnark/frontend/cs/scs" + "github.com/consensys/gnark/std/math/emulated/emparams" "github.com/consensys/gnark/test" ) @@ -1059,3 +1060,41 @@ func testSum[T FieldParams](t *testing.T) { assert.CheckCircuit(circuit, test.WithValidAssignment(witness)) }, testName[T]()) } + +type expCircuit[T FieldParams] struct { + Base Element[T] + Exp Element[T] + Expected Element[T] +} + +func (c *expCircuit[T]) Define(api frontend.API) error { + f, err := NewField[T](api) + if err != nil { + return fmt.Errorf("new variable modulus: %w", err) + } + res := f.Exp(&c.Base, &c.Exp) + f.AssertIsEqual(&c.Expected, res) + return nil +} + +func testExp[T FieldParams](t *testing.T) { + var fp T + assert := test.NewAssert(t) + assert.Run(func(assert *test.Assert) { + var circuit expCircuit[T] + base, _ := rand.Int(rand.Reader, fp.Modulus()) + exp, _ := rand.Int(rand.Reader, fp.Modulus()) + expected := new(big.Int).Exp(base, exp, fp.Modulus()) + assignment := &expCircuit[T]{ + Base: ValueOf[T](base), + Exp: ValueOf[T](exp), + Expected: ValueOf[T](expected), + } + assert.CheckCircuit(&circuit, test.WithValidAssignment(assignment)) + }, testName[T]()) +} +func TestExp(t *testing.T) { + testExp[Goldilocks](t) + testExp[BN254Fr](t) + testExp[emparams.Mod1e512](t) +} diff --git a/std/math/emulated/emparams/emparams.go b/std/math/emulated/emparams/emparams.go index b07fb6e96b..bbee1afabf 100644 --- a/std/math/emulated/emparams/emparams.go +++ b/std/math/emulated/emparams/emparams.go @@ -281,3 +281,41 @@ func (fp BLS24315Fp) Modulus() *big.Int { return ecc.BLS24_315.BaseField() } type BLS24315Fr struct{ fourLimbPrimeField } func (fr BLS24315Fr) Modulus() *big.Int { return ecc.BLS24_315.ScalarField() } + +// Mod1e4096 provides type parametrization for emulated aritmetic: +// - limbs: 64 +// - limb width: 64 bits +// +// The modulus for type parametrisation is 2^4096-1. +// +// This is non-prime modulus. It is mainly targeted for using variable-modulus +// operations (ModAdd, ModMul, ModExp, ModAssertIsEqual) for variable modulus +// arithmetic. +type Mod1e4096 struct{} + +func (Mod1e4096) NbLimbs() uint { return 64 } +func (Mod1e4096) BitsPerLimb() uint { return 64 } +func (Mod1e4096) IsPrime() bool { return false } +func (Mod1e4096) Modulus() *big.Int { + val, _ := new(big.Int).SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) + return val +} + +// Mod1e512 provides type parametrization for emulated aritmetic: +// - limbs: 8 +// - limb width: 64 bits +// +// The modulus for type parametrisation is 2^512-1. +// +// This is non-prime modulus. It is mainly targeted for using variable-modulus +// operations (ModAdd, ModMul, ModExp, ModAssertIsEqual) for variable modulus +// arithmetic. +type Mod1e512 struct{} + +func (Mod1e512) NbLimbs() uint { return 8 } +func (Mod1e512) BitsPerLimb() uint { return 64 } +func (Mod1e512) IsPrime() bool { return false } +func (Mod1e512) Modulus() *big.Int { + val, _ := new(big.Int).SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) + return val +} diff --git a/std/math/emulated/field_assert.go b/std/math/emulated/field_assert.go index a2809e4eb9..5c2c700663 100644 --- a/std/math/emulated/field_assert.go +++ b/std/math/emulated/field_assert.go @@ -46,7 +46,7 @@ func (f *Field[T]) AssertIsEqual(a, b *Element[T]) { } diff := f.Sub(b, a) - f.checkZero(diff) + f.checkZero(diff, nil) } // AssertIsLessOrEqual ensures that e is less or equal than a. For proper diff --git a/std/math/emulated/field_mul.go b/std/math/emulated/field_mul.go index 9a2671d08a..278b9a5024 100644 --- a/std/math/emulated/field_mul.go +++ b/std/math/emulated/field_mul.go @@ -58,21 +58,25 @@ type mulCheck[T FieldParams] struct { r *Element[T] // reduced value k *Element[T] // coefficient c *Element[T] // carry + p *Element[T] // modulus if non-nil } // evalRound1 evaluates first c(X), r(X) and k(X) at a given random point at[0]. // In the first round we do not assume that any of them is already evaluated as // they come directly from hint. -func (mc *mulCheck[T]) evalRound1(api frontend.API, at []frontend.Variable) { +func (mc *mulCheck[T]) evalRound1(at []frontend.Variable) { mc.c = mc.f.evalWithChallenge(mc.c, at) mc.r = mc.f.evalWithChallenge(mc.r, at) mc.k = mc.f.evalWithChallenge(mc.k, at) + if mc.p != nil { + mc.p = mc.f.evalWithChallenge(mc.p, at) + } } // evalRound2 now evaluates a and b at a given random point at[0]. However, it // may happen that a or b is equal to r from a previous mulcheck. In that case // we can reuse the evaluation to save constraints. -func (mc *mulCheck[T]) evalRound2(api frontend.API, at []frontend.Variable) { +func (mc *mulCheck[T]) evalRound2(at []frontend.Variable) { mc.a = mc.f.evalWithChallenge(mc.a, at) mc.b = mc.f.evalWithChallenge(mc.b, at) } @@ -81,6 +85,9 @@ func (mc *mulCheck[T]) evalRound2(api frontend.API, at []frontend.Variable) { // computation of p(ch) and (2^t-ch) can be shared over all mulCheck instances, // then we get them already evaluated as peval and coef. func (mc *mulCheck[T]) check(api frontend.API, peval, coef frontend.Variable) { + if mc.p != nil { + peval = mc.p.evaluation + } ls := api.Mul(mc.a.evaluation, mc.b.evaluation) rs := api.Add(mc.r.evaluation, api.Mul(peval, mc.k.evaluation), api.Mul(mc.c.evaluation, coef)) api.AssertIsEqual(ls, rs) @@ -99,14 +106,19 @@ func (mc *mulCheck[T]) cleanEvaluations() { mc.k.isEvaluated = false mc.c.evaluation = 0 mc.c.isEvaluated = false + if mc.p != nil { + mc.p.evaluation = 0 + mc.p.isEvaluated = false + } } // mulMod returns a*b mod r. In practice it computes the result using a hint and // defers the actual multiplication check. -func (f *Field[T]) mulMod(a, b *Element[T], _ uint) *Element[T] { +func (f *Field[T]) mulMod(a, b *Element[T], _ uint, p *Element[T]) *Element[T] { f.enforceWidthConditional(a) f.enforceWidthConditional(b) - k, r, c, err := f.callMulHint(a, b, true) + f.enforceWidthConditional(p) + k, r, c, err := f.callMulHint(a, b, true, p) if err != nil { panic(err) } @@ -117,18 +129,20 @@ func (f *Field[T]) mulMod(a, b *Element[T], _ uint) *Element[T] { c: c, k: k, r: r, + p: p, } f.mulChecks = append(f.mulChecks, mc) return r } // checkZero creates multiplication check a * 1 = 0 + k*p. -func (f *Field[T]) checkZero(a *Element[T]) { +func (f *Field[T]) checkZero(a *Element[T], p *Element[T]) { // the method works similarly to mulMod, but we know that we are multiplying // by one and expected result should be zero. f.enforceWidthConditional(a) + f.enforceWidthConditional(p) b := f.shortOne() - k, r, c, err := f.callMulHint(a, b, false) + k, r, c, err := f.callMulHint(a, b, false, p) if err != nil { panic(err) } @@ -139,6 +153,7 @@ func (f *Field[T]) checkZero(a *Element[T]) { c: c, k: k, r: r, // expected to be zero on zero limbs. + p: p, } f.mulChecks = append(f.mulChecks, mc) } @@ -191,6 +206,9 @@ func (f *Field[T]) performMulChecks(api frontend.API) error { toCommit = append(toCommit, f.mulChecks[i].r.Limbs...) toCommit = append(toCommit, f.mulChecks[i].k.Limbs...) toCommit = append(toCommit, f.mulChecks[i].c.Limbs...) + if f.mulChecks[i].p != nil { + toCommit = append(toCommit, f.mulChecks[i].p.Limbs...) + } } // we give all the inputs as inputs to obtain random verifier challenge. multicommit.WithCommitment(api, func(api frontend.API, commitment frontend.Variable) error { @@ -207,11 +225,11 @@ func (f *Field[T]) performMulChecks(api frontend.API) error { } // evaluate all r, k, c for i := range f.mulChecks { - f.mulChecks[i].evalRound1(api, at) + f.mulChecks[i].evalRound1(at) } // assuming r is input to some other multiplication, then is already evaluated for i := range f.mulChecks { - f.mulChecks[i].evalRound2(api, at) + f.mulChecks[i].evalRound2(at) } // evaluate p(X) at challenge pval := f.evalWithChallenge(f.Modulus(), at) @@ -234,7 +252,7 @@ func (f *Field[T]) performMulChecks(api frontend.API) error { } // callMulHint uses hint to compute r, k and c. -func (f *Field[T]) callMulHint(a, b *Element[T], isMulMod bool) (quo, rem, carries *Element[T], err error) { +func (f *Field[T]) callMulHint(a, b *Element[T], isMulMod bool, customMod *Element[T]) (quo, rem, carries *Element[T], err error) { // compute the expected overflow after the multiplication of a*b to be able // to estimate the number of bits required to represent the result. nextOverflow, _ := f.mulPreCond(a, b) @@ -249,8 +267,15 @@ func (f *Field[T]) callMulHint(a, b *Element[T], isMulMod bool) (quo, rem, carri // we compute the width of the product of a*b, then we divide it by the // width of the modulus. We add 1 to the result to ensure that we have // enough space for the quotient. + modbits := uint(f.fParams.Modulus().BitLen()) + if customMod != nil { + // when we're using custom modulus, then we do not really know its + // length ahead of time. We assume worst case scenario and assume that + // the quotient can be the total length of the multiplication result. + modbits = 0 + } nbQuoLimbs := (uint(nbMultiplicationResLimbs(len(a.Limbs), len(b.Limbs)))*nbBits + nextOverflow + 1 - // - uint(f.fParams.Modulus().BitLen()) + // + modbits + // nbBits - 1) / nbBits // the remainder is always less than modulus so can represent on the same @@ -267,7 +292,11 @@ func (f *Field[T]) callMulHint(a, b *Element[T], isMulMod bool) (quo, rem, carri len(a.Limbs), nbQuoLimbs, } - hintInputs = append(hintInputs, f.Modulus().Limbs...) + modulusLimbs := f.Modulus().Limbs + if customMod != nil { + modulusLimbs = customMod.Limbs + } + hintInputs = append(hintInputs, modulusLimbs...) hintInputs = append(hintInputs, a.Limbs...) hintInputs = append(hintInputs, b.Limbs...) ret, err := f.api.NewHint(mulHint, int(nbQuoLimbs)+int(nbRemLimbs)+int(nbCarryLimbs), hintInputs...) @@ -328,7 +357,9 @@ func mulHint(field *big.Int, inputs, outputs []*big.Int) error { quo := new(big.Int) rem := new(big.Int) ab := new(big.Int).Mul(a, b) - quo.QuoRem(ab, p, rem) + if p.Cmp(new(big.Int)) != 0 { + quo.QuoRem(ab, p, rem) + } if err := decompose(quo, uint(nbBits), quoLimbs); err != nil { return fmt.Errorf("decompose quo: %w", err) } @@ -380,7 +411,7 @@ func mulHint(field *big.Int, inputs, outputs []*big.Int) error { // For multiplying by a constant, use [Field[T].MulConst] method which is more // efficient. func (f *Field[T]) Mul(a, b *Element[T]) *Element[T] { - return f.reduceAndOp(f.mulMod, f.mulPreCond, a, b) + return f.reduceAndOp(func(a, b *Element[T], u uint) *Element[T] { return f.mulMod(a, b, u, nil) }, f.mulPreCond, a, b) } // MulMod computes a*b and reduces it modulo the field order. The returned Element @@ -388,7 +419,7 @@ func (f *Field[T]) Mul(a, b *Element[T]) *Element[T] { // // Equivalent to [Field[T].Mul], kept for backwards compatibility. func (f *Field[T]) MulMod(a, b *Element[T]) *Element[T] { - return f.reduceAndOp(f.mulMod, f.mulPreCond, a, b) + return f.reduceAndOp(func(a, b *Element[T], u uint) *Element[T] { return f.mulMod(a, b, u, nil) }, f.mulPreCond, a, b) } // MulConst multiplies a by a constant c and returns it. We assume that the @@ -463,3 +494,18 @@ func (f *Field[T]) mulNoReduce(a, b *Element[T], nextoverflow uint) *Element[T] } return f.newInternalElement(resLimbs, nextoverflow) } + +// Exp computes base^exp modulo the field order. The returned Element has default +// number of limbs and zero overflow. +func (f *Field[T]) Exp(base, exp *Element[T]) *Element[T] { + expBts := f.ToBits(exp) + n := len(expBts) + res := f.Select(expBts[0], base, f.One()) + base = f.Mul(base, base) + for i := 1; i < n-1; i++ { + res = f.Select(expBts[i], f.Mul(base, res), res) + base = f.Mul(base, base) + } + res = f.Select(expBts[n-1], f.Mul(base, res), res) + return res +} diff --git a/std/math/emulated/field_ops.go b/std/math/emulated/field_ops.go index aeaf2c3059..a9f0d9cda3 100644 --- a/std/math/emulated/field_ops.go +++ b/std/math/emulated/field_ops.go @@ -176,7 +176,7 @@ func (f *Field[T]) Reduce(a *Element[T]) *Element[T] { panic("trying to reduce a constant, which happen to have an overflow flag set") } // slow path - use hint to reduce value - return f.mulMod(a, f.One(), 0) + return f.mulMod(a, f.One(), 0, nil) } // Sub subtracts b from a and returns it. Reduces locally if wouldn't fit into @@ -204,9 +204,10 @@ func (f *Field[T]) sub(a, b *Element[T], nextOverflow uint) *Element[T] { // first we have to compute padding to ensure that the subtraction does not // underflow. + var fp T nbLimbs := max(len(a.Limbs), len(b.Limbs)) limbs := make([]frontend.Variable, nbLimbs) - padLimbs := subPadding[T](b.overflow, uint(nbLimbs)) + padLimbs := subPadding(fp.Modulus(), fp.BitsPerLimb(), b.overflow, uint(nbLimbs)) for i := range limbs { limbs[i] = padLimbs[i] if i < len(a.Limbs) { diff --git a/std/math/emulated/hints.go b/std/math/emulated/hints.go index 6c1644c407..eab14b47e9 100644 --- a/std/math/emulated/hints.go +++ b/std/math/emulated/hints.go @@ -22,6 +22,7 @@ func GetHints() []solver.Hint { InverseHint, SqrtHint, mulHint, + subPaddingHint, } } @@ -153,3 +154,51 @@ func SqrtHint(mod *big.Int, inputs []*big.Int, outputs []*big.Int) error { return nil }) } + +// subPaddingHint computes the padding for the subtraction of two numbers. It +// ensures that the padding is a multiple of the modulus. Can be used to avoid +// underflow. +// +// In case of fixed modulus use subPadding instead. +func subPaddingHint(mod *big.Int, inputs, outputs []*big.Int) error { + if len(inputs) < 4 { + return fmt.Errorf("input must be at least four elements") + } + nbLimbs := int(inputs[0].Int64()) + bitsPerLimbs := uint(inputs[1].Uint64()) + overflow := uint(inputs[2].Uint64()) + retLimbs := int(inputs[3].Int64()) + if len(inputs[4:]) != nbLimbs { + return fmt.Errorf("input length mismatch") + } + if len(outputs) != retLimbs { + return fmt.Errorf("result does not fit into output") + } + pLimbs := inputs[4 : 4+nbLimbs] + p := new(big.Int) + if err := recompose(pLimbs, bitsPerLimbs, p); err != nil { + return fmt.Errorf("recompose modulus: %w", err) + } + padLimbs := subPadding(p, bitsPerLimbs, overflow, uint(nbLimbs)) + for i := range padLimbs { + outputs[i].Set(padLimbs[i]) + } + + return nil +} + +func (f *Field[T]) computeSubPaddingHint(overflow uint, nbLimbs uint, modulus *Element[T]) *Element[T] { + var fp T + inputs := []frontend.Variable{fp.NbLimbs(), fp.BitsPerLimb(), overflow, nbLimbs} + inputs = append(inputs, modulus.Limbs...) + res, err := f.api.NewHint(subPaddingHint, int(nbLimbs), inputs...) + if err != nil { + panic(fmt.Sprintf("sub padding hint: %v", err)) + } + for i := range res { + f.checker.Check(res[i], int(fp.BitsPerLimb()+overflow+1)) + } + padding := f.newInternalElement(res, fp.BitsPerLimb()+overflow+1) + f.checkZero(padding, modulus) + return padding +} From c9022441484e8bb37722aa17b0652049d2eed63c Mon Sep 17 00:00:00 2001 From: Gautam Botrel Date: Fri, 19 Apr 2024 21:54:24 -0500 Subject: [PATCH 54/55] fix: plonk.SRSSize takes constraint.ConstraintSystem as input, not constraint.System --- backend/plonk/plonk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/plonk/plonk.go b/backend/plonk/plonk.go index f75158e9f8..1b615664ee 100644 --- a/backend/plonk/plonk.go +++ b/backend/plonk/plonk.go @@ -322,7 +322,7 @@ func NewVerifyingKey(curveID ecc.ID) VerifyingKey { // SRSSize returns the required size of the kzg SRS for a given constraint system // Note that the SRS size in Lagrange form is a power of 2, // and the SRS size in canonical form need few extra elements (3) to account for the blinding factors -func SRSSize(ccs constraint.System) (sizeCanonical, sizeLagrange int) { +func SRSSize(ccs constraint.ConstraintSystem) (sizeCanonical, sizeLagrange int) { nbConstraints := ccs.GetNbConstraints() sizeSystem := nbConstraints + ccs.GetNbPublicVariables() From ea5cdf97547e13e69d63b8b60035243fdabd8f77 Mon Sep 17 00:00:00 2001 From: Gautam Botrel Date: Mon, 22 Apr 2024 16:10:12 -0500 Subject: [PATCH 55/55] style: remove old todos (#1106) * style: remove old todos * style: removed TODO for checking staticcall * fix: update version --------- Co-authored-by: Thomas Piellard --- backend/plonk/bls12-377/prove.go | 4 ---- backend/plonk/bls12-381/prove.go | 4 ---- backend/plonk/bls24-315/prove.go | 4 ---- backend/plonk/bls24-317/prove.go | 4 ---- backend/plonk/bn254/prove.go | 4 ---- backend/plonk/bn254/solidity.go | 1 - backend/plonk/bw6-633/prove.go | 4 ---- backend/plonk/bw6-761/prove.go | 4 ---- doc.go | 2 +- .../backend/template/zkpschemes/plonk/plonk.prove.go.tmpl | 4 ---- 10 files changed, 1 insertion(+), 34 deletions(-) diff --git a/backend/plonk/bls12-377/prove.go b/backend/plonk/bls12-377/prove.go index 6c74757478..8dcd0479f0 100644 --- a/backend/plonk/bls12-377/prove.go +++ b/backend/plonk/bls12-377/prove.go @@ -254,8 +254,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1224,8 +1222,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) diff --git a/backend/plonk/bls12-381/prove.go b/backend/plonk/bls12-381/prove.go index b8060ab811..8e43526322 100644 --- a/backend/plonk/bls12-381/prove.go +++ b/backend/plonk/bls12-381/prove.go @@ -254,8 +254,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1224,8 +1222,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) diff --git a/backend/plonk/bls24-315/prove.go b/backend/plonk/bls24-315/prove.go index 16d64a5a20..b0e8f4d7cf 100644 --- a/backend/plonk/bls24-315/prove.go +++ b/backend/plonk/bls24-315/prove.go @@ -254,8 +254,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1224,8 +1222,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) diff --git a/backend/plonk/bls24-317/prove.go b/backend/plonk/bls24-317/prove.go index 1fd43d5169..1e814667b2 100644 --- a/backend/plonk/bls24-317/prove.go +++ b/backend/plonk/bls24-317/prove.go @@ -254,8 +254,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1224,8 +1222,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) diff --git a/backend/plonk/bn254/prove.go b/backend/plonk/bn254/prove.go index 5e32908260..ee1bd21def 100644 --- a/backend/plonk/bn254/prove.go +++ b/backend/plonk/bn254/prove.go @@ -254,8 +254,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1224,8 +1222,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) diff --git a/backend/plonk/bn254/solidity.go b/backend/plonk/bn254/solidity.go index 300fef7340..08d0b47113 100644 --- a/backend/plonk/bn254/solidity.go +++ b/backend/plonk/bn254/solidity.go @@ -884,7 +884,6 @@ contract PlonkVerifier { function check_pairing_kzg(mPtr) { let state := mload(0x40) - // TODO test the staticcall using the method from audit_4-5 let l_success := staticcall(gas(), 8, mPtr, 0x180, 0x00, 0x20) if iszero(l_success) { error_pairing() diff --git a/backend/plonk/bw6-633/prove.go b/backend/plonk/bw6-633/prove.go index 9422753443..c4e134320b 100644 --- a/backend/plonk/bw6-633/prove.go +++ b/backend/plonk/bw6-633/prove.go @@ -254,8 +254,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1224,8 +1222,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) diff --git a/backend/plonk/bw6-761/prove.go b/backend/plonk/bw6-761/prove.go index 9699d598ec..c431940845 100644 --- a/backend/plonk/bw6-761/prove.go +++ b/backend/plonk/bw6-761/prove.go @@ -254,8 +254,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1224,8 +1222,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta) diff --git a/doc.go b/doc.go index 233687841c..73f850a3df 100644 --- a/doc.go +++ b/doc.go @@ -22,7 +22,7 @@ import ( "github.com/consensys/gnark-crypto/ecc" ) -var Version = semver.MustParse("0.10.0-alpha") +var Version = semver.MustParse("0.10.0") // Curves return the curves supported by gnark func Curves() []ecc.ID { diff --git a/internal/generator/backend/template/zkpschemes/plonk/plonk.prove.go.tmpl b/internal/generator/backend/template/zkpschemes/plonk/plonk.prove.go.tmpl index dcd450743e..44b5a9ff28 100644 --- a/internal/generator/backend/template/zkpschemes/plonk/plonk.prove.go.tmpl +++ b/internal/generator/backend/template/zkpschemes/plonk/plonk.prove.go.tmpl @@ -231,8 +231,6 @@ func newInstance(ctx context.Context, spr *cs.SparseR1CS, pk *ProvingKey, fullWi } else { s.domain1 = fft.NewDomain(4*sizeSystem, fft.WithoutPrecompute()) } - // TODO @gbotrel domain1 is used for only 1 FFT → precomputing the twiddles - // and storing them in memory is costly given its size. → do a FFT on the fly // build trace s.trace = NewTrace(spr, s.domain0) @@ -1201,8 +1199,6 @@ func evaluateXnMinusOneDomainBigCoset(domains [2]*fft.Domain) []fr.Element { // - Z_{H}(ζ)*((H₀(X) + ζᵐ⁺²*H₁(X) + ζ²⁽ᵐ⁺²⁾*H₂(X)) func (s *instance) innerComputeLinearizedPoly(lZeta, rZeta, oZeta, alpha, beta, gamma, zeta, zu fr.Element, qcpZeta, blindedZCanonical []fr.Element, pi2Canonical [][]fr.Element, pk *ProvingKey) []fr.Element { - // TODO @gbotrel rename - // l(ζ)r(ζ) var rl fr.Element rl.Mul(&rZeta, &lZeta)