From 5f125d093f5ebed86bf00782f56dba07d0ed6c8c Mon Sep 17 00:00:00 2001 From: lovesh Date: Mon, 30 Sep 2024 20:14:09 +0530 Subject: [PATCH] Verifiable encryption of messages of a generalized Pedersen commitment using the paper "Verifiable Encryption from MPC-in-the-Head" Signed-off-by: lovesh --- Cargo.toml | 4 +- README.md | 1 + proof_system/src/lib.rs | 4 +- .../tests/bbs_plus_and_accumulator.rs | 37 ++ secret_sharing_and_dkg/src/common.rs | 2 +- utils/README.md | 2 +- utils/src/elgamal.rs | 282 ++++++++- utils/src/lib.rs | 4 +- utils/src/msm.rs | 20 + verifiable_encryption/Cargo.toml | 33 ++ verifiable_encryption/README.md | 9 + verifiable_encryption/src/error.rs | 16 + verifiable_encryption/src/lib.rs | 8 + verifiable_encryption/src/tz_21/dkgith.rs | 501 ++++++++++++++++ .../src/tz_21/dkgith_batched_elgamal.rs | 494 ++++++++++++++++ verifiable_encryption/src/tz_21/mod.rs | 28 + verifiable_encryption/src/tz_21/rdkgith.rs | 556 ++++++++++++++++++ .../src/tz_21/rdkgith_batched_elgamal.rs | 504 ++++++++++++++++ verifiable_encryption/src/tz_21/seed_tree.rs | 319 ++++++++++ verifiable_encryption/src/tz_21/util.rs | 71 +++ 20 files changed, 2885 insertions(+), 10 deletions(-) create mode 100644 verifiable_encryption/Cargo.toml create mode 100644 verifiable_encryption/README.md create mode 100644 verifiable_encryption/src/error.rs create mode 100644 verifiable_encryption/src/lib.rs create mode 100644 verifiable_encryption/src/tz_21/dkgith.rs create mode 100644 verifiable_encryption/src/tz_21/dkgith_batched_elgamal.rs create mode 100644 verifiable_encryption/src/tz_21/mod.rs create mode 100644 verifiable_encryption/src/tz_21/rdkgith.rs create mode 100644 verifiable_encryption/src/tz_21/rdkgith_batched_elgamal.rs create mode 100644 verifiable_encryption/src/tz_21/seed_tree.rs create mode 100644 verifiable_encryption/src/tz_21/util.rs diff --git a/Cargo.toml b/Cargo.toml index 29f3ea81..38843d7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,8 +20,8 @@ members = [ "bulletproofs_plus_plus", "smc_range_proof", "short_group_sig", - "syra" -] + "syra", + "verifiable_encryption"] resolver = "2" [workspace.package] diff --git a/README.md b/README.md index a2fc6643..36839612 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ Library providing privacy enhancing cryptographic primitives. 11. [Short group signatures](./short_group_sig/). BB signature and weak-BB signature and their proofs of knowledge based on the papers [Short Signatures Without Random Oracles](https://eprint.iacr.org/2004/171) and [Scalable Revocation Scheme for Anonymous Credentials Based on n-times Unlinkable Proofs](http://library.usc.edu.ph/ACM/SIGSAC%202017/wpes/p123.pdf). 12. [Keyed-Verification Anonymous Credentials (KVAC)](./kvac). Implements Keyed-Verification Anonymous Credentials (KVAC) schemes. 13. [SyRA](./syra). Implements sybil resilient signatures to be used for generating pseudonyms for low-entropy credential attributes. +14. [Verifiable encryption](./verifiable_encryption) using [this paper Verifiable Encryption from MPC-in-the-Head](https://eprint.iacr.org/2021/1704.pdf). ## Composite proof system diff --git a/proof_system/src/lib.rs b/proof_system/src/lib.rs index b66b8d4a..bf51861c 100644 --- a/proof_system/src/lib.rs +++ b/proof_system/src/lib.rs @@ -112,6 +112,8 @@ //! //! - test `pok_of_3_bbs_plus_sig_and_message_equality` proves knowledge of 3 BBS+ signatures and also that certain //! messages are equal among them without revealing them. +//! - test `pok_of_bbs_plus_sig_and_inequality_with_public_value` proves knowledge of a BBS+ signature and also that +//! certain message is not equal to a public value without revealing the message //! - test `pok_of_bbs_plus_sig_and_accumulator` proves knowledge of a BBS+ signature and also that certain messages //! are present and absent in the 2 accumulators respectively. //! - test `pok_of_knowledge_in_pedersen_commitment_and_bbs_plus_sig` proves knowledge of a BBS+ signature and opening @@ -136,7 +138,7 @@ //! is bounded by a given value, etc [here](tests/r1cs). The Circom compiler output and circuits are [here](tests/r1cs/circom). //! The circuits were compiled and tested for BLS12-381 curve. //! -//! *Note*: This design is largely inspired from my work at Hyperledger Ursa. +//! *Note*: This design is inspired from my work at Hyperledger Ursa. //! //! *Note*: The design is tentative and will likely change as more protocols are integrated. //! diff --git a/proof_system/tests/bbs_plus_and_accumulator.rs b/proof_system/tests/bbs_plus_and_accumulator.rs index 78ab4bea..5c090746 100644 --- a/proof_system/tests/bbs_plus_and_accumulator.rs +++ b/proof_system/tests/bbs_plus_and_accumulator.rs @@ -407,6 +407,7 @@ macro_rules! gen_tests { let nonce = Some(b"test-nonce".to_vec()); + let start = Instant::now(); let proof = Proof::new::( &mut rng, prover_proof_spec.clone(), @@ -416,6 +417,10 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!( + "Time to create a proof with a BBS+ signature and VB positive accumulator membership: {:?}", + start.elapsed() + ); test_serialization!(Proof, proof); @@ -623,6 +628,7 @@ macro_rules! gen_tests { test_serialization!(ProofSpec, prover_proof_spec); + let start = Instant::now(); let proof = Proof::new::( &mut rng, prover_proof_spec.clone(), @@ -632,6 +638,10 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!( + "Time to crate a proof with a BBS+ signature and VB universal accumulator membership: {:?}", + start.elapsed() + ); test_serialization!(Proof, proof); @@ -735,6 +745,7 @@ macro_rules! gen_tests { test_serialization!(ProofSpec, proof_spec); + let start = Instant::now(); let proof = Proof::new::( &mut rng, proof_spec.clone(), @@ -744,6 +755,10 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!( + "Time to create a proof with a BBS+ signature and VB universal accumulator non-membership: {:?}", + start.elapsed() + ); test_serialization!(Proof, proof); @@ -850,6 +865,7 @@ macro_rules! gen_tests { test_serialization!(ProofSpec, proof_spec); + let start = Instant::now(); let proof = Proof::new::( &mut rng, proof_spec.clone(), @@ -859,6 +875,10 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!( + "Time to create a proof with a BBS+ signature and KB universal accumulator membership: {:?}", + start.elapsed() + ); test_serialization!(Proof, proof); @@ -961,6 +981,7 @@ macro_rules! gen_tests { test_serialization!(ProofSpec, proof_spec); + let start = Instant::now(); let proof = Proof::new::( &mut rng, proof_spec.clone(), @@ -970,6 +991,10 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!( + "Time to create a proof with a BBS+ signature and KB universal accumulator non-membership: {:?}", + start.elapsed() + ); test_serialization!(Proof, proof); @@ -1081,6 +1106,7 @@ macro_rules! gen_tests { let nonce = Some(b"test-nonce".to_vec()); + let start = Instant::now(); let proof = Proof::new::( &mut rng, proof_spec.clone(), @@ -1090,6 +1116,10 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!( + "Time to create proof with a BBS+ signature and KB positive accumulator membership: {:?}", + start.elapsed() + ); test_serialization!(Proof, proof); @@ -1279,6 +1309,7 @@ macro_rules! gen_tests { test_serialization!(ProofSpec, proof_spec); + let start = Instant::now(); let proof = Proof::new::( &mut rng, proof_spec.clone(), @@ -1288,6 +1319,7 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!("Time to create proof with a BBS+ signature and 6 accumulator membership and non-membership checks: {:?}", start.elapsed()); test_serialization!(Proof, proof); @@ -1828,6 +1860,7 @@ macro_rules! gen_tests { )); test_serialization!(Witnesses, witnesses); + let start = Instant::now(); let proof = Proof::new::( &mut rng, proof_spec.clone(), @@ -1837,6 +1870,10 @@ macro_rules! gen_tests { ) .unwrap() .0; + println!( + "Time to create proof with 4 BBS+ signatures: {:?}", + start.elapsed() + ); let mut statements = Statements::new(); statements.add($verifier_stmt::new_statement_from_params_ref( diff --git a/secret_sharing_and_dkg/src/common.rs b/secret_sharing_and_dkg/src/common.rs index a1952a0c..e2f981fb 100644 --- a/secret_sharing_and_dkg/src/common.rs +++ b/secret_sharing_and_dkg/src/common.rs @@ -155,7 +155,7 @@ impl PublicKeyBase { } } -/// Return the Lagrange basis polynomial at x = 0 given the `x` coordinates +/// Return the Lagrange basis polynomial at x = 0 given the `x` coordinates. /// `(x_coords[0]) * (x_coords[1]) * ... / ((x_coords[0] - i) * (x_coords[1] - i) * ...)` /// Assumes all `x` coordinates are distinct and appropriate number of coordinates are provided pub fn lagrange_basis_at_0(x_coords: &[ShareId], i: ShareId) -> Result { diff --git a/utils/README.md b/utils/README.md index d7308df6..bb75f366 100644 --- a/utils/README.md +++ b/utils/README.md @@ -3,7 +3,7 @@ A collection of utilities used by our other libraries in this workspace. - Pedersen commitment -- Elgamal encryption +- Elgamal encryption and variations - plain Elgamal, hashed-Elgamal and batched hashed-Elgamal - finite field utilities like inner product, weighted inner product, hadamard product, etc. - multiscalar multiplication (MSM) like Fixed Base MSM - polynomial utilities like multiplying polynomials, creating polynomial from roots, etc. diff --git a/utils/src/elgamal.rs b/utils/src/elgamal.rs index 168cc67f..4209b930 100644 --- a/utils/src/elgamal.rs +++ b/utils/src/elgamal.rs @@ -1,14 +1,24 @@ -//! Elgamal encryption +//! Elgamal encryption and some variations +//! Implements: +//! 1. Plain Elgamal scheme where the message to be encrypted is a group element (of the same group as the public key) +//! 2. Hashed Elgamal where the message to be encrypted is a field element. +//! 3. A more efficient, batched hashed Elgamal where multiple messages, each being a field element, are encrypted for the same public key. -use crate::serde_utils::ArkObjectBytes; +use crate::{ + aliases::FullDigest, hashing_utils::hash_to_field, msm::WindowTable, + serde_utils::ArkObjectBytes, +}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::{ops::Neg, rand::RngCore, vec::Vec, UniformRand}; +use ark_std::{cfg_iter, ops::Neg, rand::RngCore, vec::Vec, UniformRand}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use zeroize::{Zeroize, ZeroizeOnDrop}; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + #[derive( Clone, Debug, PartialEq, Eq, Zeroize, ZeroizeOnDrop, CanonicalSerialize, CanonicalDeserialize, )] @@ -90,11 +100,211 @@ impl Ciphertext { } } + /// Returns the ciphertext but takes the window tables for the public key and generator. Useful when a lot + /// of encryptions have to be done using the same public key + pub fn new_given_randomness_and_window_tables( + msg: &G, + randomness: &G::ScalarField, + public_key: &WindowTable, + gen: &WindowTable, + ) -> Self { + let enc1 = (public_key.multiply(randomness) + msg).into_affine(); + Self { + encrypted: enc1, + eph_pk: gen.multiply(randomness).into_affine(), + } + } + pub fn decrypt(&self, secret_key: &G::ScalarField) -> G { (self.eph_pk.mul(secret_key).neg() + self.encrypted).into_affine() } } +/// Hashed Elgamal. Encryption of a field element `m`. The shared secret is hashed to a field element +/// and the result is added to the message to get the ciphertext. +#[serde_as] +#[derive( + Default, + Clone, + Copy, + Debug, + PartialEq, + Eq, + CanonicalSerialize, + CanonicalDeserialize, + Serialize, + Deserialize, +)] +pub struct HashedElgamalCiphertext { + /// `m + Hash(r * pk)` + #[serde_as(as = "ArkObjectBytes")] + pub encrypted: G::ScalarField, + /// Ephemeral public key `r * gen` + #[serde_as(as = "ArkObjectBytes")] + pub eph_pk: G, +} + +impl HashedElgamalCiphertext { + /// Returns the ciphertext and randomness created for encryption + pub fn new( + rng: &mut R, + msg: &G::ScalarField, + public_key: &G, + gen: &G, + ) -> (Self, G::ScalarField) { + let alpha = G::ScalarField::rand(rng); + ( + Self::new_given_randomness::(msg, &alpha, public_key, gen), + alpha, + ) + } + + /// Returns the ciphertext + pub fn new_given_randomness( + msg: &G::ScalarField, + randomness: &G::ScalarField, + public_key: &G, + gen: &G, + ) -> Self { + let b = randomness.into_bigint(); + let shared_secret = public_key.mul_bigint(b).into_affine(); + Self { + encrypted: Self::otp::(shared_secret) + msg, + eph_pk: gen.mul_bigint(b).into_affine(), + } + } + + /// Returns the ciphertext but takes the window tables for the public key and generator. Useful when a lot + /// of encryptions have to be done using the same public key + pub fn new_given_randomness_and_window_tables( + msg: &G::ScalarField, + randomness: &G::ScalarField, + public_key: &WindowTable, + gen: &WindowTable, + ) -> Self { + let shared_secret = public_key.multiply(randomness).into_affine(); + Self { + encrypted: Self::otp::(shared_secret) + msg, + eph_pk: gen.multiply(randomness).into_affine(), + } + } + + pub fn decrypt(&self, secret_key: &G::ScalarField) -> G::ScalarField { + let shared_secret = self.eph_pk.mul(secret_key).into_affine(); + self.encrypted - Self::otp::(shared_secret) + } + + /// Return a OTP (One Time Pad) by hashing the shared secret. + pub fn otp(shared_secret: G) -> G::ScalarField { + let mut bytes = Vec::with_capacity(shared_secret.compressed_size()); + shared_secret.serialize_uncompressed(&mut bytes).unwrap(); + hash_to_field::(b"", &bytes) + } +} + +/// Hashed Elgamal variant for encrypting a batch of messages. Encryption of vector of field elements. +/// Generates a batch of OTPs (One Time Pad) by hashing the concatenation of the shared secret and the +/// message index, corresponding to which the OTP is created. The OTPs are then added to the corresponding +/// message to get the ciphertext. This is an efficient mechanism of encrypting multiple messages to the same +/// public key as there is only 1 shared secret created by a scalar multiplication and one randomness chosen +/// by the encryptor +#[serde_as] +#[derive( + Default, + Clone, + Debug, + PartialEq, + Eq, + CanonicalSerialize, + CanonicalDeserialize, + Serialize, + Deserialize, +)] +pub struct BatchedHashedElgamalCiphertext { + /// `m_i + Hash((r * pk) || i)` + #[serde_as(as = "Vec")] + pub encrypted: Vec, + /// Ephemeral public key `r * gen` + #[serde_as(as = "ArkObjectBytes")] + pub eph_pk: G, +} + +impl BatchedHashedElgamalCiphertext { + /// Returns the ciphertext and randomness created for encryption + pub fn new( + rng: &mut R, + msgs: &[G::ScalarField], + public_key: &G, + gen: &G, + ) -> (Self, G::ScalarField) { + let alpha = G::ScalarField::rand(rng); + ( + Self::new_given_randomness::(msgs, &alpha, public_key, gen), + alpha, + ) + } + + /// Returns the ciphertext + pub fn new_given_randomness( + msgs: &[G::ScalarField], + randomness: &G::ScalarField, + public_key: &G, + gen: &G, + ) -> Self { + let b = randomness.into_bigint(); + let shared_secret = public_key.mul_bigint(b).into_affine(); + Self { + encrypted: Self::enc_with_otp::(&msgs, &shared_secret), + eph_pk: gen.mul_bigint(b).into_affine(), + } + } + + /// Returns the ciphertext but takes the window tables for the public key and generator. Useful when a lot + /// of encryptions have to be done using the same public key + pub fn new_given_randomness_and_window_tables( + msgs: &[G::ScalarField], + randomness: &G::ScalarField, + public_key: &WindowTable, + gen: &WindowTable, + ) -> Self { + let shared_secret = public_key.multiply(randomness).into_affine(); + Self { + encrypted: Self::enc_with_otp::(&msgs, &shared_secret), + eph_pk: gen.multiply(randomness).into_affine(), + } + } + + pub fn decrypt(&self, secret_key: &G::ScalarField) -> Vec { + let shared_secret = self.eph_pk.mul(secret_key).into_affine(); + cfg_iter!(self.encrypted) + .enumerate() + .map(|(i, e)| *e - Self::otp::(&shared_secret, i as u32)) + .collect::>() + } + + pub fn batch_size(&self) -> usize { + self.encrypted.len() + } + + /// Return a OTP (One Time Pad) by hashing the shared secret along with the message index. + pub fn otp(shared_secret: &G, msg_idx: u32) -> G::ScalarField { + let mut bytes = Vec::with_capacity(shared_secret.compressed_size()); + shared_secret.serialize_uncompressed(&mut bytes).unwrap(); + msg_idx.serialize_uncompressed(&mut bytes).unwrap(); + hash_to_field::(b"", &bytes) + } + + fn enc_with_otp( + msgs: &[G::ScalarField], + shared_secret: &G, + ) -> Vec { + cfg_iter!(msgs) + .enumerate() + .map(|(i, m)| Self::otp::(shared_secret, i as u32) + m) + .collect::>() + } +} + #[cfg(test)] pub mod tests { use super::*; @@ -103,6 +313,8 @@ pub mod tests { rand::{rngs::StdRng, SeedableRng}, UniformRand, }; + use blake2::Blake2b512; + use std::time::{Duration, Instant}; #[test] fn encrypt_decrypt() { @@ -120,4 +332,68 @@ pub mod tests { check::(&mut rng); check::(&mut rng); } + + #[test] + fn hashed_encrypt_decrypt() { + let mut rng = StdRng::seed_from_u64(0u64); + + fn check(rng: &mut StdRng) { + let gen = G::Group::rand(rng).into_affine(); + let (sk, pk) = keygen(rng, &gen); + + let msg = G::ScalarField::rand(rng); + let (ciphertext, _) = + HashedElgamalCiphertext::new::<_, Blake2b512>(rng, &msg, &pk.0, &gen); + assert_eq!(ciphertext.decrypt::(&sk.0), msg); + } + + check::(&mut rng); + check::(&mut rng); + } + + #[test] + fn hashed_encrypt_decrypt_batch() { + let mut rng = StdRng::seed_from_u64(0u64); + + fn check(rng: &mut StdRng) { + let gen = G::Group::rand(rng).into_affine(); + let (sk, pk) = keygen(rng, &gen); + let count = 10; + + let msgs = (0..count) + .map(|_| G::ScalarField::rand(rng)) + .collect::>(); + let mut enc_time = Duration::default(); + let mut dec_time = Duration::default(); + for i in 0..count { + let start = Instant::now(); + let (ciphertext, _) = + HashedElgamalCiphertext::new::<_, Blake2b512>(rng, &msgs[i], &pk.0, &gen); + enc_time += start.elapsed(); + let start = Instant::now(); + assert_eq!(ciphertext.decrypt::(&sk.0), msgs[i]); + dec_time += start.elapsed(); + } + println!( + "For encryption {} messages one by one, time to encrypt {:?} and to decrypt: {:?}", + count, enc_time, dec_time + ); + + let start = Instant::now(); + let (ciphertext, _) = + BatchedHashedElgamalCiphertext::new::<_, Blake2b512>(rng, &msgs, &pk.0, &gen); + enc_time = start.elapsed(); + let start = Instant::now(); + assert_eq!(ciphertext.decrypt::(&sk.0), msgs); + dec_time = start.elapsed(); + + println!( + "For encryption {} messages in batch, time to encrypt {:?} and to decrypt: {:?}", + count, enc_time, dec_time + ); + } + + check::(&mut rng); + check::(&mut rng); + } } diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 92390aa2..c370c0a8 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -1,7 +1,7 @@ //! A collection of utilities used by our other libraries in this workspace. //! //! - Pedersen commitment -//! - Elgamal encryption +//! - Elgamal encryption, including Hashed Elgamal //! - finite field utilities like inner product, weighted inner product, hadamard product, etc. //! - multiscalar multiplication (MSM) like Fixed Base MSM //! - polynomial utilities like multiplying polynomials, creating polynomial from roots, etc. @@ -21,7 +21,7 @@ pub mod extend_some; pub mod serde_utils; pub mod ecies; -/// Elgamal encryption +/// Elgamal encryption and variations - plain Elgamal, hashed-Elgamal and batched hashed-Elgamal pub mod elgamal; /// Finite field utilities like inner product, weighted inner product, hadamard product, etc diff --git a/utils/src/msm.rs b/utils/src/msm.rs index e71718b9..72df2428 100644 --- a/utils/src/msm.rs +++ b/utils/src/msm.rs @@ -120,6 +120,26 @@ pub mod tests { let group_elem_affine = group_elem.into_affine(); let table = context.table(group_elem); + let mut g1 = G1::rand(&mut rng).into_affine(); + let mut g2 = G2::rand(&mut rng).into_affine(); + let e = Fr::rand(&mut rng); + + let start = Instant::now(); + g1 = (g1 * e).into_affine(); + println!("G1 scalar multiplication time {:?}", start.elapsed()); + + let start = Instant::now(); + g2 = (g2 * e).into_affine(); + println!("G2 scalar multiplication time {:?}", start.elapsed()); + + let start = Instant::now(); + let mut _gt = ::pairing(g1, g2); + println!("pairing time {:?}", start.elapsed()); + + let start = Instant::now(); + _gt = _gt * e; + println!("GT multiplication time {:?}", start.elapsed()); + let mut d0 = Duration::default(); let mut d1 = Duration::default(); let mut d2 = Duration::default(); diff --git a/verifiable_encryption/Cargo.toml b/verifiable_encryption/Cargo.toml new file mode 100644 index 00000000..c27e2e15 --- /dev/null +++ b/verifiable_encryption/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "verifiable-encryption" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true + +[dependencies] +ark-serialize.workspace = true +ark-ff.workspace = true +ark-ec.workspace = true +ark-std.workspace = true +digest.workspace = true +rayon = {workspace = true, optional = true} +serde.workspace = true +serde_with.workspace = true +zeroize.workspace = true +dock_crypto_utils = { version = "0.20.0", default-features = false, path = "../utils" } +sha3 = { version = "0.10.6", default-features = false } +itertools.workspace = true +secret_sharing_and_dkg = { version = "0.13.0", default-features = false, path = "../secret_sharing_and_dkg" } + +[dev-dependencies] +blake2.workspace = true +ark-bls12-381.workspace = true +ark-secp256r1 = { version = "^0.4.0", default-features = false } + +[features] +default = [ "parallel" ] +std = [ "ark-ff/std", "ark-ec/std", "ark-std/std", "ark-serialize/std", "dock_crypto_utils/std", "serde/std"] +print-trace = [ "ark-std/print-trace", "dock_crypto_utils/print-trace" ] +parallel = [ "std", "ark-ff/parallel", "ark-ec/parallel", "ark-std/parallel", "rayon", "dock_crypto_utils/parallel" ] \ No newline at end of file diff --git a/verifiable_encryption/README.md b/verifiable_encryption/README.md new file mode 100644 index 00000000..bfa6cac9 --- /dev/null +++ b/verifiable_encryption/README.md @@ -0,0 +1,9 @@ + + +# Verifiable encryption schemes + +Verifiable encryption of discrete log(s) from the paper [Verifiable Encryption from MPC-in-the-Head](https://eprint.iacr.org/2021/1704.pdf). + +Adapted to allow encrypting messages of a generalized Pedersen commitment and some other optimizations. See the [corresponding module](src/tz_21) for more details. + + diff --git a/verifiable_encryption/src/error.rs b/verifiable_encryption/src/error.rs new file mode 100644 index 00000000..39e21779 --- /dev/null +++ b/verifiable_encryption/src/error.rs @@ -0,0 +1,16 @@ +use ark_serialize::SerializationError; + +#[derive(Debug)] +pub enum VerifiableEncryptionError { + InvalidProof, + DecryptionFailed, + CiphertextNotFound(u16), + ShareNotFound(u16), + Serialization(SerializationError), +} + +impl From for VerifiableEncryptionError { + fn from(e: SerializationError) -> Self { + Self::Serialization(e) + } +} diff --git a/verifiable_encryption/src/lib.rs b/verifiable_encryption/src/lib.rs new file mode 100644 index 00000000..3d21c360 --- /dev/null +++ b/verifiable_encryption/src/lib.rs @@ -0,0 +1,8 @@ +//! # Verifiable encryption schemes +//! +//! Verifiable encryption of discrete log(s) from the paper [Verifiable Encryption from MPC-in-the-Head](https://eprint.iacr.org/2021/1704.pdf). +//! +//! Adapted to allow encrypting messages of a generalized Pedersen commitment and some other optimizations. See the [corresponding module](src/tz_21) for more details. + +pub mod error; +pub mod tz_21; diff --git a/verifiable_encryption/src/tz_21/dkgith.rs b/verifiable_encryption/src/tz_21/dkgith.rs new file mode 100644 index 00000000..4818752e --- /dev/null +++ b/verifiable_encryption/src/tz_21/dkgith.rs @@ -0,0 +1,501 @@ +//! Verifiable Encryption from DKG-in-the-head following Protocol 4 from the paper but adapted for the relation +//! `Y = G_1 * x_1 + G_2 * x_2 + ... G_k * x_k` where `x_i` are encrypted, `Y` and `G_i` are public. +//! Overview of the construction: +//! 1. For each repetition, the prover secret shares each witness (`x_i`) using additive secret sharing of the form: for a witness `w`, +//! create `N` shares by selecting `N` random values `s_1`, `s_2`, ... `s_n` and setting `delta = w - \sum_{i=1 to N}{s_i}`, `s_1 = s1 + delta`. Note that now +//! the sum of shares is the witness as `\sum_{i=1 to N}{s_i} = w`. Its important to select a random for `s_1` first otherwise the construction won't be +//! secure when any index expect for party 1 is revealed (because of delta). +//! 2. Each party's share is encrypted and committed to: party `j` commits to its shares of the witnesses `x_i` as `C_i = G_1 * s_{1,j} + G_2 * s_{1,j} + ... G_k * s_{k,j}` +//! where `s_{i,j}` is the j'th party's share of witness `x_i`. Each share's ciphertext is of the form `(shared_secret, OTP_{i,j} + s_{i,j})` where `OTP_{i,j}` is +//! the one time pad derived from the shared secret for the j-th share of witness `i`, i.e. `s_{i,j}`. +//! 3. Prover commits to all ciphertexts and commitments and for each repetition, picks (using random oracle) a random party index whose shares are not to be +//! revealed but reveals shares of all other parties to the verifier and shares the ciphertext of shares of the hidden party. +//! 4. Using the revealed shares and ciphertexts, verifier reconstructs all the ciphertexts and commitments and checks the prover's integrity. +//! 5. To compress the ciphertexts, verifier chooses a small number of repetitions from all the repetitions and for each repetition, adds the revealed shares +//! and the unrevealed share's ciphertext. Becuase of the homomorphic property of ciphertexts, the additions gives the ciphertext of the witness +//! as `OTP_{i,j} + s_{i,j} + \sum{k!=j}{s_{i,k}} = OTP_i + x_i` (`delta` also needs to added to this sum depending on which index is being revealed). +//! 6. Decryptor generates the OTP using shared secret and decrypts to get the witnesses. + +use crate::{ + error::VerifiableEncryptionError, + tz_21::{ + seed_tree::{SeedTree, TreeOpening}, + util::get_indices_to_hide, + }, +}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; +use ark_ff::Zero; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, rand::RngCore, vec, vec::Vec}; +use digest::{Digest, DynDigest}; +use dock_crypto_utils::{aliases::FullDigest, elgamal::HashedElgamalCiphertext, msm::WindowTable}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Ciphertext and the proof of encryption +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct DkgithProof< + const SEED_SIZE: usize, + const SALT_SIZE: usize, + const NUM_PARTIES: usize, + const DEPTH: usize, + const NUM_TOTAL_NODES: usize, + const NUM_REPETITIONS: usize, + G: AffineRepr, +> { + pub challenge: Vec, + /// Ciphertext of the unopened shares of each witness in each iteration + pub ciphertexts: [Vec>; NUM_REPETITIONS], + /// Openings required to reconstruct tree in each iteration to reveal the shares except one + pub tree_openings: [TreeOpening; NUM_REPETITIONS], + /// Delta for each witness in each iteration + pub deltas: [Vec; NUM_REPETITIONS], + pub salt: [u8; SALT_SIZE], +} + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct CompressedCiphertext( + [Vec>; SUBSET_SIZE], +); + +impl< + const SEED_SIZE: usize, + const SALT_SIZE: usize, + const NUM_PARTIES: usize, + const DEPTH: usize, + const NUM_TOTAL_NODES: usize, + const NUM_REPETITIONS: usize, + G: AffineRepr, + > DkgithProof +{ + const CHECK_SALT_SIZE: () = assert!((2 * SEED_SIZE) == SALT_SIZE); + + pub fn new( + rng: &mut R, + witnesses: Vec, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Self { + let _ = Self::CHECK_SALT_SIZE; + let witness_count = witnesses.len(); + assert_eq!(comm_key.len(), witness_count); + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + + let mut salt = [0u8; SALT_SIZE]; + rng.fill_bytes(&mut salt); + DynDigest::update(&mut hasher, &salt); + + // Populate the trees for each repetition + let root_seeds = + [SeedTree::::random_seed(rng); + NUM_REPETITIONS]; + let mut seed_trees = [SeedTree::::default(); + NUM_REPETITIONS]; + cfg_iter_mut!(seed_trees) + .zip(cfg_into_iter!(root_seeds)) + .enumerate() + .for_each(|(rep_index, (tree, root_seed))| { + *tree = SeedTree::::create_given_root_node( + root_seed, &salt, rep_index, + ); + }); + + let zero_ff = G::ScalarField::zero(); + let mut cts: [Vec<[HashedElgamalCiphertext; NUM_PARTIES]>; NUM_REPETITIONS] = [(); + NUM_REPETITIONS] + .map(|_| vec![[HashedElgamalCiphertext::::default(); NUM_PARTIES]; witness_count]); + let mut deltas: [Vec; NUM_REPETITIONS] = + [(); NUM_REPETITIONS].map(|_| vec![zero_ff; witness_count]); + let mut share_commitments = [[G::zero(); NUM_PARTIES]; NUM_REPETITIONS]; + + let enc_key_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_key.into_group(), + ); + let enc_gen_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_gen.into_group(), + ); + + // TODO: creating `share_commitments` can be optimized because comm_key remains the same. + + cfg_iter_mut!(cts) + .zip(cfg_iter_mut!(share_commitments)) + .zip(cfg_iter_mut!(deltas)) + .enumerate() + .for_each(|(rep_index, ((ct, cm), d))| { + // For repetition index `rep_index` + let shares_rep = cfg_into_iter!(0..witness_count).zip(cfg_iter_mut!(d)).zip(cfg_iter_mut!(ct)).map(|((i, d_i), ct_i)| { + // For i'th witness, generate additive shares of the witness by using randomness from the seed tree of this repetition + let mut shares_i = [zero_ff; NUM_PARTIES]; + cfg_iter_mut!(shares_i).enumerate().for_each(|(j, s_j)| { + // Get the share for j'th party + *s_j = seed_trees[rep_index].get_leaf_as_finite_field_element::(j as u16, &salt, rep_index, &i.to_le_bytes()); + }); + + let sum = cfg_iter!(shares_i).sum::(); + *d_i = witnesses[i] - sum; + shares_i[0] += d_i; + + // Encrypt each party's share and use the tree to get the randomness for the encryption + cfg_iter_mut!(ct_i).enumerate().for_each(|(k, ct_ik)| { + let r = seed_trees[rep_index].get_leaf_as_finite_field_element::(k as u16, &salt, rep_index, &(witness_count + i).to_le_bytes()); + *ct_ik = HashedElgamalCiphertext::new_given_randomness_and_window_tables::(&shares_i[k], &r, &enc_key_table, &enc_gen_table); + }); + + shares_i + }).collect::>(); + + // Each party commits to its share of the witnesses + let c = cfg_into_iter!(0..NUM_PARTIES).map(|j| { + let shares_j = cfg_into_iter!(0..witness_count).map(|k| shares_rep[k][j]).collect::>(); + G::Group::msm_unchecked(comm_key, &shares_j) + }).collect::>(); + *cm = G::Group::normalize_batch(&c).try_into().unwrap(); + }); + + for i in 0..NUM_REPETITIONS { + for j in 0..NUM_PARTIES { + hash_elem!(share_commitments[i][j], hasher, to_hash); + for k in 0..witness_count { + hash_elem!(cts[i][k][j], hasher, to_hash); + } + } + } + + // Challenge can also be an array since the digest function is a parameter which makes the output size also known at compile time + let challenge = Box::new(hasher).finalize().to_vec(); + // Indices of parties whose share won't be shared with the verifier. Generated by a random oracle. + let indices_to_hide = + get_indices_to_hide::(&challenge, NUM_REPETITIONS as u16, NUM_PARTIES as u16); + // Ciphertexts for hidden shares + let mut ciphertexts: [Vec>; NUM_REPETITIONS] = + [(); NUM_REPETITIONS].map(|_| Vec::with_capacity(witness_count)); + // Openings to let the verifier learn all shares except the one which prover wants to hide. + let mut tree_openings: [TreeOpening; NUM_REPETITIONS] = + [[SeedTree::::zero_seed(); DEPTH]; + NUM_REPETITIONS]; + + for i in 0..NUM_REPETITIONS { + ciphertexts[i] = cts[i] + .iter() + .map(|ct| ct[indices_to_hide[i] as usize].clone()) + .collect::>(); + } + cfg_iter_mut!(tree_openings).enumerate().for_each(|(i, t)| { + *t = seed_trees[i].open_seeds(indices_to_hide[i]); + }); + + Self { + challenge, + ciphertexts, + tree_openings, + deltas, + salt, + } + } + + fn verify( + &self, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Result<(), VerifiableEncryptionError> { + let _ = Self::CHECK_SALT_SIZE; + let witness_count = comm_key.len(); + for i in 0..NUM_REPETITIONS { + assert_eq!(self.ciphertexts[i].len(), witness_count); + assert_eq!(self.deltas[i].len(), witness_count); + } + let hidden_indices = + get_indices_to_hide::(&self.challenge, NUM_REPETITIONS as u16, NUM_PARTIES as u16); + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + DynDigest::update(&mut hasher, &self.salt); + + let mut cts: [Vec<[HashedElgamalCiphertext; NUM_PARTIES]>; NUM_REPETITIONS] = [(); + NUM_REPETITIONS] + .map(|_| vec![[HashedElgamalCiphertext::::default(); NUM_PARTIES]; witness_count]); + let mut comms = [[G::zero(); NUM_PARTIES]; NUM_REPETITIONS]; + + let zero_ff = G::ScalarField::zero(); + + let enc_key_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_key.into_group(), + ); + let enc_gen_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_gen.into_group(), + ); + + cfg_iter_mut!(cts) + .zip(cfg_iter_mut!(comms)) + .enumerate() + .for_each(|(rep_index, (ct, cm))| { + // For repetition index `rep_index` + + // Reconstruct revealed shares + let seed_tree = SeedTree::::reconstruct_tree( + hidden_indices[rep_index], + &self.tree_openings[rep_index], + &self.salt, + rep_index, + ); + let hidden_party_index = hidden_indices[rep_index] as usize; + let shares_rep = cfg_into_iter!(0..witness_count).zip(cfg_iter_mut!(ct)).map(|(i, ct_i)| { + // For i'th witness, create its shares + let mut shares_i = [zero_ff; NUM_PARTIES]; + cfg_iter_mut!(shares_i).enumerate().for_each(|(j, s_j)| { + // For j'th party + if hidden_party_index != j { + *s_j = seed_tree.get_leaf_as_finite_field_element::(j as u16, &self.salt, rep_index, &i.to_le_bytes()); + } + }); + shares_i[0] += self.deltas[rep_index][i]; + + // Create ciphertexts for revealed share + cfg_iter_mut!(ct_i).enumerate().for_each(|(j, ct_ij)| { + if hidden_party_index != j { + let r = seed_tree.get_leaf_as_finite_field_element::(j as u16, &self.salt, rep_index, &(witness_count + i).to_le_bytes()); + *ct_ij = HashedElgamalCiphertext::new_given_randomness_and_window_tables::(&shares_i[j], &r, &enc_key_table, &enc_gen_table); + } else { + *ct_ij = self.ciphertexts[rep_index][i].clone(); + } + }); + + shares_i + }).collect::>(); + + // Reconstruct commitments to the shares + let mut c = vec![G::Group::zero(); NUM_PARTIES]; + cfg_iter_mut!(c).enumerate().for_each(|(j, c_j)| { + if hidden_party_index != j { + let shares_j = cfg_into_iter!(0..witness_count).map(|k| shares_rep[k][j]).collect::>(); + *c_j = G::Group::msm_unchecked(comm_key, &shares_j); + } + }); + // Since the sum of all shares is the witness, sum of all commitments to the shares will be the final commitment and + // thus the commitment to the unrevealed share is the difference of final commitment and sum of revealed shares' commitments + c[hidden_party_index] = commitment.into_group() - cfg_iter!(c).sum::(); + *cm = G::Group::normalize_batch(&c).try_into().unwrap(); + }); + + for i in 0..NUM_REPETITIONS { + for j in 0..NUM_PARTIES { + hash_elem!(comms[i][j], hasher, to_hash); + for k in 0..witness_count { + hash_elem!(cts[i][k][j], hasher, to_hash); + } + } + } + let challenge = Box::new(hasher).finalize().to_vec(); + if challenge != self.challenge { + return Err(VerifiableEncryptionError::InvalidProof); + } + Ok(()) + } + + /// Described in Appendix D.1 in the paper + pub fn compress( + &self, + ) -> CompressedCiphertext { + let _ = Self::CHECK_SALT_SIZE; + const { assert!(SUBSET_SIZE <= NUM_REPETITIONS) }; + + let hidden_indices = + get_indices_to_hide::(&self.challenge, NUM_REPETITIONS as u16, NUM_PARTIES as u16); + // Choose a random subset of size `SUBSET_SIZE` from the indices of `hidden_indices` + // let subset = (0..NUM_REPETITIONS).collect().iter().choose_multiple(rng, SUBSET_SIZE); + // TODO: Check if this is secure. The objective is to avoid the use of random number generation on the verifier side + let subset = get_indices_to_hide::( + &D::digest(&self.challenge), + SUBSET_SIZE as u16, + NUM_REPETITIONS as u16, + ); + + let witness_count = self.ciphertexts[0].len(); + let mut compressed_cts: [Vec>; SUBSET_SIZE] = + [(); SUBSET_SIZE].map(|_| vec![HashedElgamalCiphertext::::default(); witness_count]); + + cfg_iter_mut!(compressed_cts) + .enumerate() + .for_each(|(i, ct)| { + let rep_index = subset[i] as usize; + // Get the revealed shares + let seed_tree = + SeedTree::::reconstruct_tree( + hidden_indices[rep_index], + &self.tree_openings[rep_index], + &self.salt, + rep_index, + ); + // Add the revealed shares to the ciphertext of the unrevealed share to get a ciphertext of the witness. + cfg_iter_mut!(ct).enumerate().for_each(|(j, ct_j)| { + // Get sum of shares of the j'th witness + let share_sum = cfg_into_iter!(0..NUM_PARTIES) + .map(|k| { + if hidden_indices[rep_index] != k as u16 { + seed_tree.get_leaf_as_finite_field_element::( + k as u16, + &self.salt, + rep_index, + &j.to_le_bytes(), + ) + } else { + G::ScalarField::zero() + } + }) + .sum::(); + *ct_j = self.ciphertexts[rep_index][j].clone(); + ct_j.encrypted += share_sum; + // 0th share contains delta already + if hidden_indices[rep_index] != 0 { + ct_j.encrypted += self.deltas[rep_index][j]; + } + }) + }); + CompressedCiphertext(compressed_cts) + } + + // NOTE: Ideally the verifier will compress the ciphertext and since functions `verify` and `compress` share some code, it will be more efficient to + // have a function called `verify_and_compress` than calling `verify` and then `compress` +} + +impl CompressedCiphertext { + pub fn decrypt( + &self, + dec_key: &G::ScalarField, + commitment: &G, + comm_key: &[G], + ) -> Result, VerifiableEncryptionError> { + let witness_count = comm_key.len(); + for i in 0..SUBSET_SIZE { + let ct_i = &self.0[i]; + assert_eq!(ct_i.len(), witness_count); + let mut witnesses = vec![G::ScalarField::zero(); witness_count]; + cfg_iter_mut!(witnesses).enumerate().for_each(|(j, w)| { + *w = ct_i[j].decrypt::(dec_key); + }); + if *commitment == G::Group::msm_unchecked(comm_key, &witnesses).into_affine() { + return Ok(witnesses); + } + } + Err(VerifiableEncryptionError::DecryptionFailed) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_bls12_381::G1Affine; + use ark_ec::{CurveGroup, VariableBaseMSM}; + use ark_std::{ + rand::{prelude::StdRng, SeedableRng}, + UniformRand, + }; + use blake2::Blake2b512; + use dock_crypto_utils::elgamal::keygen; + use std::time::Instant; + + #[test] + fn prove_verify() { + fn check(count: usize) { + let mut rng = StdRng::seed_from_u64(0u64); + + let gen = G::rand(&mut rng); + let (sk, pk) = keygen::<_, G>(&mut rng, &gen); + + let witnesses = (0..count) + .map(|_| G::ScalarField::rand(&mut rng)) + .collect::>(); + let comm_key = (0..count).map(|_| G::rand(&mut rng)).collect::>(); + let commitment = G::Group::msm_unchecked(&comm_key, &witnesses).into_affine(); + + const SEED_SIZE: usize = 16; + const SALT_SIZE: usize = 32; + + // const TEST_VECTOR: [(u16, u16, u16); 4] = + // [(64, 48, 15), (85, 20, 20), (16, 32, 30), (4, 64, 48)]; + + // for (N, tau, n) in TEST_VECTOR.iter() { + // let proof = DkgithProof::::new(&mut rng, witnesses.clone(), &commitment, &comm_key); + // } + + macro_rules! run_test { + ($parties: expr, $reps: expr, $depth: expr, $nodes: expr, $subset_size: expr) => {{ + println!( + "\n# witnesses = {}, # parties = {}, # repetitions = {}, subset size = {}", + count, $parties, $reps, $subset_size + ); + let start = Instant::now(); + let proof = DkgithProof::< + SEED_SIZE, + SALT_SIZE, + $parties, + $depth, + $nodes, + $reps, + _, + >::new::<_, Blake2b512>( + &mut rng, + witnesses.clone(), + &commitment, + &comm_key, + &pk.0, + &gen, + ); + println!("Proof generated in: {:?}", start.elapsed()); + + let start = Instant::now(); + proof + .verify::(&commitment, &comm_key, &pk.0, &gen) + .unwrap(); + println!("Proof verified in: {:?}", start.elapsed()); + println!("Proof size: {:?}", proof.compressed_size()); + + let start = Instant::now(); + let ct = proof.compress::<$subset_size, Blake2b512>(); + println!("Ciphertext compressed in: {:?}", start.elapsed()); + println!("Ciphertext size: {:?}", ct.compressed_size()); + + let start = Instant::now(); + let decrypted_witnesses = ct + .decrypt::(&sk.0, &commitment, &comm_key) + .unwrap(); + println!("Ciphertext decrypted in: {:?}", start.elapsed()); + assert_eq!(decrypted_witnesses, witnesses); + }}; + } + + run_test!(64, 48, 6, 127, 15); + run_test!(16, 32, 4, 31, 30); + run_test!(4, 64, 2, 7, 48); + } + + check::(1); + check::(2); + check::(3); + check::(4); + check::(8); + } +} diff --git a/verifiable_encryption/src/tz_21/dkgith_batched_elgamal.rs b/verifiable_encryption/src/tz_21/dkgith_batched_elgamal.rs new file mode 100644 index 00000000..72fef556 --- /dev/null +++ b/verifiable_encryption/src/tz_21/dkgith_batched_elgamal.rs @@ -0,0 +1,494 @@ +//! Verifiable Encryption from DKG-in-the-head following Protocol 4 from the paper but adapted for the relation +//! `Y = G_1 * x_1 + G_2 * x_2 + ... G_k * x_k` where `x_i` are encrypted, `Y` and `G_i` are public. +//! The only difference with implementation in the module `dkgith` is that a more efficient variant of Elgamal called batched +//! Elgamal is used where rather than generating a new shared secret for each witness's share, only 1 shared secret is generated +//! per party and then independent OTPs are derived for each witness share by "appending" counters to that shared secret. + +// TODO: This contains a lot of duplicate code from module `dkgith`. Abstract the encryption scheme to reduce duplication + +use crate::{ + error::VerifiableEncryptionError, + tz_21::{ + seed_tree::{SeedTree, TreeOpening}, + util::get_indices_to_hide, + }, +}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; +use ark_ff::Zero; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, rand::RngCore, vec, vec::Vec}; +use digest::{Digest, DynDigest}; +use dock_crypto_utils::{ + aliases::FullDigest, elgamal::BatchedHashedElgamalCiphertext, msm::WindowTable, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct DkgithProof< + const SEED_SIZE: usize, + const SALT_SIZE: usize, + const NUM_PARTIES: usize, + const DEPTH: usize, + const NUM_TOTAL_NODES: usize, + const NUM_REPETITIONS: usize, + G: AffineRepr, +> { + pub challenge: Vec, + /// Ciphertext of the unopened shares of each witness in each iteration + pub ciphertexts: [BatchedHashedElgamalCiphertext; NUM_REPETITIONS], + /// Openings required to reconstruct tree in each iteration to reveal the shares except one + pub tree_openings: [TreeOpening; NUM_REPETITIONS], + pub deltas: [Vec; NUM_REPETITIONS], + /// Delta for each witness in each iteration + pub salt: [u8; SALT_SIZE], +} + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct CompressedCiphertext( + [BatchedHashedElgamalCiphertext; SUBSET_SIZE], +); + +impl< + const SEED_SIZE: usize, + const SALT_SIZE: usize, + const NUM_PARTIES: usize, + const DEPTH: usize, + const NUM_TOTAL_NODES: usize, + const NUM_REPETITIONS: usize, + G: AffineRepr, + > DkgithProof +{ + const CHECK_SALT_SIZE: () = assert!((2 * SEED_SIZE) == SALT_SIZE); + + pub fn new( + rng: &mut R, + witnesses: Vec, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Self { + let _ = Self::CHECK_SALT_SIZE; + let witness_count = witnesses.len(); + assert_eq!(comm_key.len(), witness_count); + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + + let mut salt = [0u8; SALT_SIZE]; + rng.fill_bytes(&mut salt); + DynDigest::update(&mut hasher, &salt); + + // Populate the trees for each repetition + let root_seeds = + [SeedTree::::random_seed(rng); + NUM_REPETITIONS]; + let mut seed_trees = [SeedTree::::default(); + NUM_REPETITIONS]; + cfg_iter_mut!(seed_trees) + .zip(cfg_into_iter!(root_seeds)) + .enumerate() + .for_each(|(rep_index, (tree, root_seed))| { + *tree = SeedTree::::create_given_root_node( + root_seed, &salt, rep_index, + ); + }); + + let zero_ff = G::ScalarField::zero(); + // let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = + // [(); NUM_REPETITIONS].map(|_| [(); NUM_PARTIES].map(|_| BatchedHashedElgamalCiphertext::::default())); + // let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = + // [(); NUM_REPETITIONS].map(|_| [BatchedHashedElgamalCiphertext::::default(); NUM_PARTIES]); + // let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = from_fn(|_| from_fn(|_| BatchedHashedElgamalCiphertext::::default())); + // let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = [(); NUM_REPETITIONS].map(|_| <[BatchedHashedElgamalCiphertext; NUM_PARTIES]>::repeat(BatchedHashedElgamalCiphertext::::default()).as_ref_array()); + // let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = [(); NUM_REPETITIONS].map(|_| arr![BatchedHashedElgamalCiphertext::::default(); NUM_PARTIES]); + // let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = [(); NUM_REPETITIONS].map(|_| array![BatchedHashedElgamalCiphertext::::default(); NUM_PARTIES]); + let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = (0 + ..NUM_REPETITIONS) + .map(|_| { + (0..NUM_PARTIES) + .map(|_| BatchedHashedElgamalCiphertext::::default()) + .collect::>() + .try_into() + .unwrap() + }) + .collect::>() + .try_into() + .unwrap(); + + let mut deltas: [Vec; NUM_REPETITIONS] = + [(); NUM_REPETITIONS].map(|_| vec![zero_ff; witness_count]); + let mut share_commitments = [[G::zero(); NUM_PARTIES]; NUM_REPETITIONS]; + + let enc_key_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_key.into_group(), + ); + let enc_gen_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_gen.into_group(), + ); + + cfg_iter_mut!(cts) + .zip(cfg_iter_mut!(share_commitments)) + .zip(cfg_iter_mut!(deltas)) + .enumerate() + .for_each(|(rep_index, ((ct, cm), d))| { + // For repetition index `rep_index` + let shares_rep = cfg_into_iter!(0..witness_count).zip(cfg_iter_mut!(d)).map(|(i, d_i)| { + // For i'th witness, generate additive shares of the witness by using randomness from the seed tree of this repetition + let mut shares_i = [zero_ff; NUM_PARTIES]; + cfg_iter_mut!(shares_i).enumerate().for_each(|(j, s_j)| { + // For j'th party + *s_j = seed_trees[rep_index].get_leaf_as_finite_field_element::(j as u16, &salt, rep_index, &i.to_le_bytes()); + }); + + let sum = cfg_iter!(shares_i).sum::(); + *d_i = witnesses[i] - sum; + shares_i[0] += d_i; + + shares_i + }).collect::>(); + + // Each party commits to its share of the witnesses + // Encrypt each party's share and use the tree to get the randomness for the encryption + let c = cfg_iter_mut!(ct).enumerate().map(|(j, ct_j)|{ + let shares_j = cfg_into_iter!(0..witness_count).map(|k| shares_rep[k][j]).collect::>(); + let r = seed_trees[rep_index].get_leaf_as_finite_field_element::(j as u16, &salt, rep_index, &(witness_count + j).to_le_bytes()); + *ct_j = BatchedHashedElgamalCiphertext::new_given_randomness_and_window_tables::(&shares_j, &r, &enc_key_table, &enc_gen_table); + G::Group::msm_unchecked(comm_key, &shares_j) + }).collect::>(); + *cm = G::Group::normalize_batch(&c).try_into().unwrap(); + }); + + for i in 0..NUM_REPETITIONS { + for j in 0..NUM_PARTIES { + hash_elem!(share_commitments[i][j], hasher, to_hash); + hash_elem!(cts[i][j], hasher, to_hash); + } + } + + let challenge = Box::new(hasher).finalize().to_vec(); + // Indices of parties whose share won't be shared with the verifier. Generated by a random oracle. + let indices_to_hide = + get_indices_to_hide::(&challenge, NUM_REPETITIONS as u16, NUM_PARTIES as u16); + // Ciphertexts for hidden shares + let mut ciphertexts: [BatchedHashedElgamalCiphertext; NUM_REPETITIONS] = + [(); NUM_REPETITIONS].map(|_| BatchedHashedElgamalCiphertext::::default()); + // Openings to let the verifier learn all shares except the one which prover wants to hide. + let mut tree_openings: [TreeOpening; NUM_REPETITIONS] = + [[SeedTree::::zero_seed(); DEPTH]; + NUM_REPETITIONS]; + + for i in 0..NUM_REPETITIONS { + ciphertexts[i] = cts[i][indices_to_hide[i] as usize].clone(); + } + cfg_iter_mut!(tree_openings).enumerate().for_each(|(i, t)| { + *t = seed_trees[i].open_seeds(indices_to_hide[i]); + }); + + Self { + challenge, + ciphertexts, + tree_openings, + deltas, + salt, + } + } + + fn verify( + &self, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Result<(), VerifiableEncryptionError> { + let _ = Self::CHECK_SALT_SIZE; + let witness_count = comm_key.len(); + for i in 0..NUM_REPETITIONS { + assert_eq!(self.ciphertexts[i].batch_size(), witness_count); + assert_eq!(self.deltas[i].len(), witness_count); + } + let hidden_indices = + get_indices_to_hide::(&self.challenge, NUM_REPETITIONS as u16, NUM_PARTIES as u16); + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + DynDigest::update(&mut hasher, &self.salt); + + let mut cts: [[BatchedHashedElgamalCiphertext; NUM_PARTIES]; NUM_REPETITIONS] = (0 + ..NUM_REPETITIONS) + .map(|_| { + (0..NUM_PARTIES) + .map(|_| BatchedHashedElgamalCiphertext::::default()) + .collect::>() + .try_into() + .unwrap() + }) + .collect::>() + .try_into() + .unwrap(); + let mut comms = [[G::zero(); NUM_PARTIES]; NUM_REPETITIONS]; + + let zero_ff = G::ScalarField::zero(); + + let enc_key_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_key.into_group(), + ); + let enc_gen_table = WindowTable::new( + NUM_REPETITIONS * NUM_PARTIES * witness_count, + enc_gen.into_group(), + ); + + cfg_iter_mut!(cts) + .zip(cfg_iter_mut!(comms)) + .enumerate() + .for_each(|(rep_index, (ct, cm))| { + // For repetition index `rep_index` + + // Reconstruct revealed shares + let seed_tree = SeedTree::::reconstruct_tree( + hidden_indices[rep_index], + &self.tree_openings[rep_index], + &self.salt, + rep_index, + ); + let hidden_party_index = hidden_indices[rep_index] as usize; + let shares_rep = cfg_into_iter!(0..witness_count).map(|i| { + // For i'th witness, create its shares + let mut shares_i = [zero_ff; NUM_PARTIES]; + cfg_iter_mut!(shares_i).enumerate().for_each(|(j, s_j)| { + // For j'th party + if hidden_party_index != j { + *s_j = seed_tree.get_leaf_as_finite_field_element::(j as u16, &self.salt, rep_index, &i.to_le_bytes()); + } + }); + shares_i[0] += self.deltas[rep_index][i]; + shares_i + }).collect::>(); + + // Create ciphertexts for revealed share + // Reconstruct commitments to the shares + let mut c = vec![G::Group::zero(); NUM_PARTIES]; + cfg_iter_mut!(c).zip(cfg_iter_mut!(ct)).enumerate().for_each(|(j, (c_j, ct_j))| { + if hidden_party_index != j { + let shares_j = cfg_into_iter!(0..witness_count).map(|k| shares_rep[k][j]).collect::>(); + let r = seed_tree.get_leaf_as_finite_field_element::(j as u16, &self.salt, rep_index, &(witness_count + j).to_le_bytes()); + *c_j = G::Group::msm_unchecked(comm_key, &shares_j); + *ct_j = BatchedHashedElgamalCiphertext::new_given_randomness_and_window_tables::(&shares_j, &r, &enc_key_table, &enc_gen_table); + } else { + *ct_j = self.ciphertexts[rep_index].clone(); + } + }); + // Since the sum of all shares is the witness, sum of all commitments to the shares will be the final commitment and + // thus the commitment to the unrevealed share is the difference of final commitment and sum of revealed shares' commitments + c[hidden_party_index] = commitment.into_group() - cfg_iter!(c).sum::(); + *cm = G::Group::normalize_batch(&c).try_into().unwrap(); + }); + + for i in 0..NUM_REPETITIONS { + for j in 0..NUM_PARTIES { + hash_elem!(comms[i][j], hasher, to_hash); + hash_elem!(cts[i][j], hasher, to_hash); + } + } + let challenge = Box::new(hasher).finalize().to_vec(); + if challenge != self.challenge { + return Err(VerifiableEncryptionError::InvalidProof); + } + Ok(()) + } + + pub fn compress( + &self, + ) -> CompressedCiphertext { + let _ = Self::CHECK_SALT_SIZE; + const { assert!(SUBSET_SIZE <= NUM_REPETITIONS) }; + let hidden_indices = + get_indices_to_hide::(&self.challenge, NUM_REPETITIONS as u16, NUM_PARTIES as u16); + // Choose a random subset of size `SUBSET_SIZE` from the indices of `hidden_indices` + // TODO: Check if this is secure. The objective is to avoid the use of random number generation on the verifier side + let subset = get_indices_to_hide::( + &D::digest(&self.challenge), + SUBSET_SIZE as u16, + NUM_REPETITIONS as u16, + ); + let witness_count = self.deltas[0].len(); + let mut compressed_cts: [BatchedHashedElgamalCiphertext; SUBSET_SIZE] = + [(); SUBSET_SIZE].map(|_| BatchedHashedElgamalCiphertext::::default()); + + cfg_iter_mut!(compressed_cts) + .enumerate() + .for_each(|(i, ct)| { + let rep_index = subset[i] as usize; + + // Get the revealed shares + let seed_tree = + SeedTree::::reconstruct_tree( + hidden_indices[rep_index], + &self.tree_openings[rep_index], + &self.salt, + rep_index, + ); + let shares_sum = cfg_into_iter!(0..witness_count).map(|j| { + // Get sum of shares of the j'th witness + let sum = cfg_into_iter!(0..NUM_PARTIES) + .map(|k| { + if hidden_indices[rep_index] != k as u16 { + seed_tree.get_leaf_as_finite_field_element::( + k as u16, + &self.salt, + rep_index, + &j.to_le_bytes(), + ) + } else { + G::ScalarField::zero() + } + }) + .sum::(); + // 0th share contains delta already + if hidden_indices[rep_index] != 0 { + sum + self.deltas[rep_index][j] + } else { + sum + } + }); + *ct = self.ciphertexts[rep_index].clone(); + // Add the revealed shares to the ciphertext of the unrevealed share to get a ciphertext of the witness. + cfg_iter_mut!(ct.encrypted) + .zip(cfg_into_iter!(shares_sum)) + .for_each(|(e_j, s_j)| { + *e_j += s_j; + }) + }); + + CompressedCiphertext(compressed_cts) + } + + // NOTE: Ideally the verifier will compress the ciphertext and since functions `verify` and `compress` share some code, it will be more efficient to + // have a function called `verify_and_compress` than calling `verify` and then `compress` +} + +impl CompressedCiphertext { + pub fn decrypt( + &self, + dec_key: &G::ScalarField, + commitment: &G, + comm_key: &[G], + ) -> Result, VerifiableEncryptionError> { + let witness_count = comm_key.len(); + for i in 0..SUBSET_SIZE { + let ct_i = &self.0[i]; + assert_eq!(ct_i.batch_size(), witness_count); + let witnesses = ct_i.decrypt::(dec_key); + if *commitment == G::Group::msm_unchecked(comm_key, &witnesses).into_affine() { + return Ok(witnesses); + } + } + Err(VerifiableEncryptionError::DecryptionFailed) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_bls12_381::G1Affine; + use ark_ec::{CurveGroup, VariableBaseMSM}; + use ark_std::{ + rand::{prelude::StdRng, SeedableRng}, + UniformRand, + }; + use blake2::Blake2b512; + use dock_crypto_utils::elgamal::keygen; + use std::time::Instant; + + #[test] + fn prove_verify() { + fn check(count: usize) { + let mut rng = StdRng::seed_from_u64(0u64); + + let gen = G::rand(&mut rng); + let (sk, pk) = keygen::<_, G>(&mut rng, &gen); + + let witnesses = (0..count) + .map(|_| G::ScalarField::rand(&mut rng)) + .collect::>(); + let comm_key = (0..count).map(|_| G::rand(&mut rng)).collect::>(); + let commitment = G::Group::msm_unchecked(&comm_key, &witnesses).into_affine(); + + const SEED_SIZE: usize = 16; + const SALT_SIZE: usize = 32; + + macro_rules! run_test { + ($parties: expr, $reps: expr, $depth: expr, $nodes: expr, $subset_size: expr) => {{ + println!( + "\n# witnesses = {}, # parties = {}, # repetitions = {}, subset size = {}", + count, $parties, $reps, $subset_size + ); + let start = Instant::now(); + let proof = DkgithProof::< + SEED_SIZE, + SALT_SIZE, + $parties, + $depth, + $nodes, + $reps, + _, + >::new::<_, Blake2b512>( + &mut rng, + witnesses.clone(), + &commitment, + &comm_key, + &pk.0, + &gen, + ); + println!("Proof generated in: {:?}", start.elapsed()); + + let start = Instant::now(); + proof + .verify::(&commitment, &comm_key, &pk.0, &gen) + .unwrap(); + println!("Proof verified in: {:?}", start.elapsed()); + println!("Proof size: {:?}", proof.compressed_size()); + + let start = Instant::now(); + let ct = proof.compress::<$subset_size, Blake2b512>(); + println!("Ciphertext compressed in: {:?}", start.elapsed()); + println!("Ciphertext size: {:?}", ct.compressed_size()); + + let start = Instant::now(); + let decrypted_witnesses = ct + .decrypt::(&sk.0, &commitment, &comm_key) + .unwrap(); + println!("Ciphertext decrypted in: {:?}", start.elapsed()); + assert_eq!(decrypted_witnesses, witnesses); + }}; + } + + run_test!(64, 48, 6, 127, 15); + run_test!(16, 32, 4, 31, 30); + run_test!(4, 64, 2, 7, 48); + } + + check::(1); + check::(2); + check::(3); + check::(4); + check::(8); + } +} diff --git a/verifiable_encryption/src/tz_21/mod.rs b/verifiable_encryption/src/tz_21/mod.rs new file mode 100644 index 00000000..5b52f0b0 --- /dev/null +++ b/verifiable_encryption/src/tz_21/mod.rs @@ -0,0 +1,28 @@ +//! Verifiable encryption of discrete log(s) from the paper [Verifiable Encryption from MPC-in-the-Head](https://eprint.iacr.org/2021/1704.pdf) +//! Implements the following 2 schemes from the paper +//! +//! 1. DKG in the head, described in Protocol 4 +//! 2. Robust DKG in the head, described in Protocol 5 +//! +//! Started of from [this](https://github.com/akiratk0355/verenc-mpcith/tree/main/dkgith/src) reference implementation in the paper +//! +//! Both are generalized such that the encryption is of not just a single discrete log but multiple witnesses, +//! thus allowing to encrypt messages of a generalized Pedersen commitment. eg. given a generalized Pedersen +//! commitment `Y = G_1 * x_1 + G_2 * x_2 + ... G_n * x_n`, prover encrypts `x_1, x_2, ..., x_n` while +//! proving that those are opening of the commitment `Y`. +//! +//! For both schemes, a variation is included where multiple witnesses are encrypted using a more efficient +//! version of Elgamal encryption, called batched-Elgamal where a single shared secret is generated when +//! encrypting multiple messages and that shared secret is combined with a counter to generate a unique OTP +//! for each message. +//! +//! More docs in the corresponding modules. + +#[macro_use] +pub mod util; + +pub mod dkgith; +pub mod dkgith_batched_elgamal; +pub mod rdkgith; +pub mod rdkgith_batched_elgamal; +pub mod seed_tree; diff --git a/verifiable_encryption/src/tz_21/rdkgith.rs b/verifiable_encryption/src/tz_21/rdkgith.rs new file mode 100644 index 00000000..f6d9198e --- /dev/null +++ b/verifiable_encryption/src/tz_21/rdkgith.rs @@ -0,0 +1,556 @@ +//! Verifiable Encryption from DKG-in-the-head following Protocol 5 from the paper but adapted for the relation +//! `Y = G_1 * x_1 + G_2 * x_2 + ... G_k * x_k` where `x_i` are encrypted, `Y` and `G_i` are public. +//! Overview of the construction: +//! 1. For an encryption of `k` witnesses `x_1, x_2, ..., x_k`, prover secret shares each witness using Shamir secret sharing. +//! It samples `k` random polynomials `F_1, F_2, ... F_k` of degree `t` such that `t+1` evaluations of each polynomial are needed +//! to reconstruct the polynomial. `F_i(0) = x_i` and `F_i(j) = x_i + f_{i, 1}*j + f_{i, 1}*j^2 + ... f_{i, t}*j^t` and `f_{i, k}` is +//! the k-th coefficient of the polynomial `F_i`. +//! 2. Prover evaluates each polynomial at `N` points and each of these form a share of a party and encrypts each share. Each share's +//! ciphertext is of the form `(shared_secret, OTP_{i,j} + F_i(j))` where `OTP_{i,j}` is the one time pad derived from the shared secret +//! for the j-th share of witness `i`, i.e. `F_i(j)`. +//! 3. Prover commits to the coefficients of polynomials `t` commitments where commitment to j-th coefficient is `C_j = G_1 * f_{1, j} + G_2 * f_{2, j} + ... + G_k * f_{k, j}`. +//! 4. Prover sends `t` shares, commitment to the coefficients of the polynomials and `N-t` ciphertexts to the verifier which the +//! verifier cannot use to recover any witness since `t+1` shares are needed to reconstruct any witness. +//! 5. The verifier using the commitment to the coefficients of the polynomials and the revealed shares verifies the correctness of shares (same idea as +//! Feldman secret sharing) and integrity of prover's computation. +//! 6. To compress the `N-t` ciphertexts, verifier chooses a small number of ciphertexts and for each of those ciphertexts, multiplies it by +//! the appropriate Lagrange coefficient and adds it to sum of each revealed share with the corresponding Lagrange coefficient. The sum gives the +//! encryption of the witness. Eg, for the j-th share of i-th witness, ciphertext is `CT_j = OTP_{i,j} + F_i(j)`. Multiplying `CT_j` by its +//! Lagrange coefficient `l_j` and multiplying each of the `t` revealed shares of i-th witness with their Lagrange coefficient `l_k` as `F_i(k) * l_k` +//! and adding these gives `CT_j * l_j + \sum_{k!=j}{F_i(k) * l_k} = OTP_{i,j} * l_j + F_i(j) * l_j + \sum_{k!=j}{F_i(k) * l_k} = OTP_{i,j} * l_j + x_i`. +//! 7. Now decryptor can decrypt a ciphertext by computing Lagrange coefficient `l_j` and one time pad `OTP_{i,j}` to get witness `x_i` + +use crate::{error::VerifiableEncryptionError, tz_21::util::get_unique_indices_to_hide}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; +use ark_ff::{Field, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, rand::RngCore, UniformRand}; +use digest::{Digest, DynDigest}; +use dock_crypto_utils::{ + aliases::FullDigest, + elgamal::HashedElgamalCiphertext, + ff::{powers, powers_starting_from}, + hashing_utils::hash_to_field, + msm::WindowTable, +}; +use secret_sharing_and_dkg::{ + common::{lagrange_basis_at_0, lagrange_basis_at_0_for_all}, + shamir_ss::deal_secret, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct RdkgithProof< + const NUM_PARTIES: usize, + const THRESHOLD: usize, + const NUM_PARTIES_MINUS_THRESHOLD: usize, + G: AffineRepr, +> { + pub challenge: Vec, + /// Commitment to the coefficients of polynomials + pub poly_commitments: [G; THRESHOLD], + /// Ciphertexts of the shares. The first element of the tuple is the party index + // Following could be made a map indexed with u16 to speed up computation (lookups) by trading off memory + pub ciphertexts: [(u16, Vec>); NUM_PARTIES_MINUS_THRESHOLD], + /// Revealed shares and randomness used for encryption. The first element of the tuple is the party index + pub shares_and_enc_rands: [(u16, Vec<(G::ScalarField, G::ScalarField)>); THRESHOLD], +} + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct CompressedCiphertext( + [Vec>; SUBSET_SIZE], + /// This is helper data for making the decryptor more efficient. The decryptor could compute this + /// on its own from the proof. + [G::ScalarField; SUBSET_SIZE], +); + +impl< + const NUM_PARTIES: usize, + const THRESHOLD: usize, + const NUM_PARTIES_MINUS_THRESHOLD: usize, + G: AffineRepr, + > RdkgithProof +{ + // assert_eq! does not compile in stable Rust + const CHECK_THRESHOLD: () = assert!(THRESHOLD + NUM_PARTIES_MINUS_THRESHOLD == NUM_PARTIES); + + pub fn new( + rng: &mut R, + witnesses: Vec, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Self { + let () = Self::CHECK_THRESHOLD; + let witness_count = witnesses.len(); + assert_eq!(comm_key.len(), witness_count); + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + + let enc_key_table = WindowTable::new(NUM_PARTIES * witness_count, enc_key.into_group()); + let enc_gen_table = WindowTable::new(NUM_PARTIES * witness_count, enc_gen.into_group()); + + let mut commitments = [G::zero(); THRESHOLD]; + let mut polys = Vec::with_capacity(witness_count); + let mut shares = Vec::with_capacity(witness_count); + let mut enc_rands: Vec<[G::ScalarField; NUM_PARTIES]> = Vec::with_capacity(witness_count); + let mut cts: [Vec>; NUM_PARTIES] = + [(); NUM_PARTIES].map(|_| vec![HashedElgamalCiphertext::::default(); witness_count]); + + // Secret share each witness such that `THRESHOLD` + 1 shares are needed to reconstruct + for w in witnesses { + let (s, mut poly) = + deal_secret::(rng, w, THRESHOLD as u16 + 1, NUM_PARTIES as u16) + .unwrap(); + shares.push(s); + // 0th coefficient is the witness + poly.coeffs.remove(0); + polys.push(poly); + // Create randomness for encryption of each share + let mut r = [G::ScalarField::zero(); NUM_PARTIES]; + for i in 0..NUM_PARTIES { + r[i] = G::ScalarField::rand(rng); + } + enc_rands.push(r); + } + // Commit to coefficients of the polynomials + cfg_iter_mut!(commitments) + .enumerate() + .for_each(|(i, cm_i)| { + let coeffs = cfg_into_iter!(0..witness_count) + .map(|j| polys[j].coeffs[i]) + .collect::>(); + *cm_i = G::Group::msm_unchecked(comm_key, &coeffs).into_affine(); + }); + // Encrypt each share + cfg_iter_mut!(cts).enumerate().for_each(|(i, ct)| { + cfg_iter_mut!(ct).enumerate().for_each(|(j, ct_j)| { + *ct_j = HashedElgamalCiphertext::new_given_randomness_and_window_tables::( + &shares[j].0[i].share, + &enc_rands[j][i], + &enc_key_table, + &enc_gen_table, + ); + }); + }); + + for i in 0..THRESHOLD { + hash_elem!(commitments[i], hasher, to_hash); + } + for i in 0..NUM_PARTIES { + for j in 0..witness_count { + hash_elem!(cts[i][j], hasher, to_hash); + } + } + + let challenge = Box::new(hasher).finalize().to_vec(); + // Indices of the `NUM_PARTIES_MINUS_THRESHOLD` parties for which ciphertexts of the shares will be given to the verifier. + let indices_to_hide = get_unique_indices_to_hide::( + &challenge, + NUM_PARTIES_MINUS_THRESHOLD as u16, + NUM_PARTIES as u16, + ); + + let mut ciphertexts: [(u16, Vec>); NUM_PARTIES_MINUS_THRESHOLD] = + [(); NUM_PARTIES_MINUS_THRESHOLD].map(|_| (0, Vec::with_capacity(witness_count))); + let mut shares_and_enc_rands: [(u16, Vec<(G::ScalarField, G::ScalarField)>); THRESHOLD] = + [(); THRESHOLD].map(|_| (0, Vec::with_capacity(witness_count))); + + // Prepare `THRESHOLD` number of shares and encryption randomness and `NUM_PARTIES_MINUS_THRESHOLD` number of ciphertexts to share with the verifier + let mut ctx_idx = 0; + let mut s_idx = 0; + for i in 0..NUM_PARTIES { + if indices_to_hide.contains(&(i as u16)) { + ciphertexts[ctx_idx].0 = i as u16; + for j in 0..witness_count { + ciphertexts[ctx_idx].1.push(cts[i][j]); + } + ctx_idx += 1; + } else { + shares_and_enc_rands[s_idx].0 = i as u16; + for j in 0..witness_count { + shares_and_enc_rands[s_idx] + .1 + .push((shares[j].0[i].share, enc_rands[j][i])); + } + s_idx += 1; + } + } + + debug_assert_eq!(ctx_idx, NUM_PARTIES_MINUS_THRESHOLD); + debug_assert_eq!(s_idx, THRESHOLD); + + Self { + challenge, + poly_commitments: commitments, + ciphertexts, + shares_and_enc_rands, + } + } + + fn verify( + &self, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Result<(), VerifiableEncryptionError> { + let () = Self::CHECK_THRESHOLD; + let witness_count = comm_key.len(); + for i in 0..NUM_PARTIES_MINUS_THRESHOLD { + assert_eq!(self.ciphertexts[i].1.len(), witness_count); + } + for i in 0..THRESHOLD { + assert_eq!(self.shares_and_enc_rands[i].1.len(), witness_count); + } + let hidden_indices = get_unique_indices_to_hide::( + &self.challenge, + NUM_PARTIES_MINUS_THRESHOLD as u16, + NUM_PARTIES as u16, + ); + for (i, _) in self.ciphertexts.iter() { + assert!(hidden_indices.contains(i)); + } + for (i, _) in self.shares_and_enc_rands.iter() { + assert!(!hidden_indices.contains(i)); + } + + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + + let enc_key_table = WindowTable::new(NUM_PARTIES * witness_count, enc_key.into_group()); + let enc_gen_table = WindowTable::new(NUM_PARTIES * witness_count, enc_gen.into_group()); + + let mut cts: [Vec>; NUM_PARTIES] = + [(); NUM_PARTIES].map(|_| vec![HashedElgamalCiphertext::::default(); witness_count]); + + cfg_iter_mut!(cts).enumerate().for_each(|(i, ct)| { + if hidden_indices.contains(&(i as u16)) { + // Ciphertexts given in the proof + for (k, c) in &self.ciphertexts { + if i as u16 == *k { + *ct = c.clone(); + break + } + } + } else { + for (k, sr) in &self.shares_and_enc_rands { + // Create ciphertexts for shares and randomness given in the proof + if i as u16 == *k { + *ct = cfg_into_iter!(0..witness_count).map(|j| HashedElgamalCiphertext::new_given_randomness_and_window_tables::(&sr[j].0, &sr[j].1, &enc_key_table, &enc_gen_table)).collect(); + break + } + } + } + }); + + for i in 0..THRESHOLD { + hash_elem!(self.poly_commitments[i], hasher, to_hash); + } + for i in 0..NUM_PARTIES { + for j in 0..witness_count { + hash_elem!(cts[i][j], hasher, to_hash); + } + } + + let challenge = Box::new(hasher).finalize().to_vec(); + if challenge != self.challenge { + return Err(VerifiableEncryptionError::InvalidProof); + } + + // This is slow and was just for testing + // for (i, sr) in &self.shares_and_enc_rands { + // let mut pows = powers::(&G::ScalarField::from(i+1), THRESHOLD as u32 + 1); + // pows.remove(0); + // let shares = sr.into_iter().map(|j| j.0.clone()).collect::>(); + // if G::Group::msm_unchecked(comm_key, &shares) != (G::Group::msm_unchecked(&self.commitments, &pows) + *commitment) { + // return Err(VerifiableEncryptionError::InvalidProof); + // } + // } + + // Need to check that the commitment to the coefficients of polynomials are consistent with the commitment to the shares. + // Eg. each party i, check if G_1 * F_1(i) + ... + G_k * F_k(i) == commitment + poly_commitments[0] * i + poly_commitments[1] * i^2 + ... + poly_commitments[t-1] * i^{t-1} + // Each check requires an MSM, which is expensive as we have to do `THRESHOLD` number of checks requiring `THRESHOLD`. So we combine these `THRESHOLD` + // checks into 1 by using a random linear combination. i.e. rather than doing `THRESHOLD` checks of the form `LHS_i == RHS_i`, verifier generates `THRESHOLD` + // number of random values `r_1, r_2, .., t_t` and checks if + // `LHS_1 * r_1 + LHS_2 * r_2 + ... LHS_t * r_t == RHS_1 * r_1 + RHS_2 * r_2 + ... RHS_t * r_t` --- (1) + // New LHS = `L` = `G_1 * ( F_1(1)*r_1 + F_1(2)*r_2 + ... + F_1(t)*r_t ) + G_2 * ( F_2(1)*r_1 + F_2(2)*r_2 + ... + F_2(t)*r_t ) + ... G_k * ( F_k(1)*r_1 + F_k(2)*r_2 + ... + F_k(t)*r_t )` --- (2) + // New RHS = `R` = `commitment * (r_1 + r_2 + ... r_t) + poly_commitments[0] * (r_1*1 + r_2*2 + ... r_t*t) + ... + poly_commitments[t-1]* (r_1*1^t + r_2*2^t + ... r_t*t^t)` + + let random = hash_to_field::(b"", &D::digest(&self.challenge)); + // Create many randoms from single random + // randoms = [1, random, random^2, ..., random^{t-1}], randoms[j] = random^j + let randoms = powers::(&random, THRESHOLD as u32); + + // For each witness, create sum of its share multiplied by a random value. + // For witness i, create \sum_{j=1 to THRESHOLD}{s_{i,j} * randoms[j]}. These sums for the scalars which when multiplied + // with commitment key give the new LHS `L` in above equation (2) + let evals = cfg_into_iter!(0..witness_count) + .map(|i| { + cfg_iter!(self.shares_and_enc_rands) + .enumerate() + .map(|(j, (_, sr))| sr[i].0 * randoms[j]) + .sum::() + }) + .collect::>(); + + // Powers of party indices, and each index's power multiplied by a random. s_i is the party index + // [ [1, s_1, {s_1}^2, ..., {s_1}^t], [random, random*s_2, random*{s_2}^2, ..., random*{s_2}^t], [random^2, random^2*s_3, random^2*{s_3}^2, ..., random^2*{s_3}^t], ... [random^{t-1}, random^{t-1}*s_t, ..., random^{t-1}*{s_t}^{t-1}] ] + let pows: Vec<_> = cfg_into_iter!(randoms) + .zip(cfg_iter!(self.shares_and_enc_rands)) + .map(|(r, (j, _))| { + powers_starting_from::( + r, + &G::ScalarField::from(j + 1), // +1 because party indices start from 1. + THRESHOLD as u32 + 1, + ) + }) + .collect::>(); + + // [1 + random + random^2 + .. + random^{t-1}, s_1 + random*s_2 + random^2*s_3 .. + random^{t-1}*s_t, .., {s_1}^t + random*{s_2}^t + .. random^{t-1}*{s_t}^{t-1}] + let mut power_sums = cfg_into_iter!(0..THRESHOLD + 1) + .map(|i| { + cfg_into_iter!(0..THRESHOLD) + .map(|j| pows[j][i]) + .sum::() + }) + .collect::>(); + + let mut c = self.poly_commitments.to_vec(); + c.insert(0, *commitment); + + // if G::Group::msm_unchecked(comm_key, &evals) != G::Group::msm_unchecked(&c, &power_sums) { + // return Err(VerifiableEncryptionError::InvalidProof); + // } + // Convert above 2 MSMs into 1 + c.extend_from_slice(comm_key); + let mut evals = cfg_into_iter!(evals).map(|e| -e).collect::>(); + power_sums.append(&mut evals); + if G::Group::msm_unchecked(&c, &power_sums) != G::Group::zero() { + return Err(VerifiableEncryptionError::InvalidProof); + } + Ok(()) + } + + /// Described in Appendix D.2 in the paper + pub fn compress( + &self, + ) -> CompressedCiphertext { + let () = Self::CHECK_THRESHOLD; + const { assert!(SUBSET_SIZE <= NUM_PARTIES_MINUS_THRESHOLD) }; + let hidden_indices = get_unique_indices_to_hide::( + &self.challenge, + NUM_PARTIES_MINUS_THRESHOLD as u16, + NUM_PARTIES as u16, + ); + for (i, _) in self.ciphertexts.iter() { + assert!(hidden_indices.contains(i)); + } + for (i, _) in self.shares_and_enc_rands.iter() { + assert!(!hidden_indices.contains(i)); + } + let witness_count = self.ciphertexts[0].1.len(); + let mut compressed_cts: [Vec>; SUBSET_SIZE] = + [(); SUBSET_SIZE].map(|_| vec![HashedElgamalCiphertext::::default(); witness_count]); + + // Party indices for which shares are revealed + let mut opened_indices = Vec::with_capacity(THRESHOLD); + for i in 0..NUM_PARTIES as u16 { + if !hidden_indices.contains(&i) { + // 1 is added to each party's index so that no index is 0 as polynomial can't be evaluated at 0 + opened_indices.push(i + 1); + } + } + let hidden_indices = hidden_indices.into_iter().collect::>(); + + // Choose a random subset of size `SUBSET_SIZE` from the indices of `hidden_indices` + // TODO: Check if this is secure. The objective is to avoid the use of random number generation on the verifier side + let subset = get_unique_indices_to_hide::( + &D::digest(&self.challenge), + SUBSET_SIZE as u16, + NUM_PARTIES_MINUS_THRESHOLD as u16, + ) + .into_iter() + .map(|i| hidden_indices[i as usize]) + .collect::>(); + + // Lagrange basis for each index in `opened_indices` + let lagrange_basis_for_opened_indices = + lagrange_basis_at_0_for_all::(opened_indices.clone()).unwrap(); + + // Lagrange basis for each index in `subset` + let mut lagrange_basis_for_hidden_indices = [G::ScalarField::zero(); SUBSET_SIZE]; + cfg_iter_mut!(lagrange_basis_for_hidden_indices) + .enumerate() + .for_each(|(i, l_i)| { + *l_i = + lagrange_basis_at_0::(&opened_indices, subset[i] + 1).unwrap() + }); + + cfg_iter_mut!(compressed_cts) + .enumerate() + .for_each(|(i, ct)| { + // +1 as polynomial can't be evaluated at 0 + let party_index = subset[i] + 1; + + let mut cphtx_idx = None; + for (j, (k, _)) in self.ciphertexts.iter().enumerate() { + if *k == subset[i] { + cphtx_idx = Some(j); + } + } + let cphtx_idx = cphtx_idx.unwrap(); + + let deltas = cfg_iter!(opened_indices) + .enumerate() + .map(|(j, o)| { + let p = G::ScalarField::from(party_index); + let o = G::ScalarField::from(*o); + (lagrange_basis_for_opened_indices[j] * p) * (p - o).inverse().unwrap() + }) + .collect::>(); + + cfg_iter_mut!(ct).enumerate().for_each(|(j, ct_j)| { + ct_j.eph_pk = self.ciphertexts[cphtx_idx].1[j].eph_pk; + ct_j.encrypted = self.ciphertexts[cphtx_idx].1[j].encrypted + * lagrange_basis_for_hidden_indices[i]; + ct_j.encrypted += cfg_iter!(deltas) + .zip(cfg_iter!(self.shares_and_enc_rands)) + .map(|(d, (_, sr))| *d * sr[j].0) + .sum::(); + }) + }); + + CompressedCiphertext(compressed_cts, lagrange_basis_for_hidden_indices) + } +} + +impl CompressedCiphertext { + pub fn decrypt( + &self, + dec_key: &G::ScalarField, + commitment: &G, + comm_key: &[G], + ) -> Result, VerifiableEncryptionError> { + let witness_count = comm_key.len(); + for i in 0..SUBSET_SIZE { + assert_eq!(self.0[i].len(), witness_count); + let witnesses = cfg_into_iter!(0..witness_count) + .map(|j| { + let otp = self.1[i] + * HashedElgamalCiphertext::otp::( + (self.0[i][j].eph_pk * dec_key).into_affine(), + ); + self.0[i][j].encrypted - otp + }) + .collect::>(); + if *commitment == G::Group::msm_unchecked(comm_key, &witnesses).into_affine() { + return Ok(witnesses); + } + } + Err(VerifiableEncryptionError::DecryptionFailed) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_bls12_381::G1Affine; + use ark_ec::{CurveGroup, VariableBaseMSM}; + use ark_std::{ + rand::{prelude::StdRng, SeedableRng}, + UniformRand, + }; + use blake2::Blake2b512; + use dock_crypto_utils::elgamal::keygen; + use std::time::Instant; + + #[test] + fn prove_verify() { + fn check(count: usize) { + let mut rng = StdRng::seed_from_u64(0u64); + + let gen = G::rand(&mut rng); + let (sk, pk) = keygen::<_, G>(&mut rng, &gen); + + let witnesses = (0..count) + .map(|_| G::ScalarField::rand(&mut rng)) + .collect::>(); + let comm_key = (0..count).map(|_| G::rand(&mut rng)).collect::>(); + let commitment = G::Group::msm_unchecked(&comm_key, &witnesses).into_affine(); + + macro_rules! run_test { + ($parties: expr, $threshold: expr, $parties_minus_thresh: expr, $subset_size: expr) => {{ + println!( + "\n# witnesses = {}, # parties = {}, # threshold = {}, subset size = {}", + count, $parties, $threshold, $subset_size + ); + let start = Instant::now(); + let proof = RdkgithProof::< + $parties, + $threshold, + $parties_minus_thresh, + _, + >::new::<_, Blake2b512>( + &mut rng, + witnesses.clone(), + &commitment, + &comm_key, + &pk.0, + &gen, + ); + println!("Proof generated in: {:?}", start.elapsed()); + + let start = Instant::now(); + proof + .verify::(&commitment, &comm_key, &pk.0, &gen) + .unwrap(); + println!("Proof verified in: {:?}", start.elapsed()); + println!("Proof size: {:?}", proof.compressed_size()); + + let start = Instant::now(); + let ct = proof.compress::<$subset_size, Blake2b512>(); + println!("Ciphertext compressed in: {:?}", start.elapsed()); + println!("Ciphertext size: {:?}", ct.compressed_size()); + + let start = Instant::now(); + let decrypted_witnesses = ct + .decrypt::(&sk.0, &commitment, &comm_key) + .unwrap(); + println!("Ciphertext decrypted in: {:?}", start.elapsed()); + assert_eq!(decrypted_witnesses, witnesses); + }}; + } + + run_test!(132, 64, 68, 67); + run_test!(192, 36, 156, 145); + run_test!(512, 23, 489, 406); + run_test!(160, 80, 80, 55); + run_test!(256, 226, 30, 30); + run_test!(704, 684, 20, 20); + } + + check::(1); + check::(2); + check::(3); + check::(4); + check::(8); + } +} diff --git a/verifiable_encryption/src/tz_21/rdkgith_batched_elgamal.rs b/verifiable_encryption/src/tz_21/rdkgith_batched_elgamal.rs new file mode 100644 index 00000000..4014ca8a --- /dev/null +++ b/verifiable_encryption/src/tz_21/rdkgith_batched_elgamal.rs @@ -0,0 +1,504 @@ +//! Verifiable Encryption from DKG-in-the-head following Protocol 4 from the paper but adapted for the relation +//! `Y = G_1 * x_1 + G_2 * x_2 + ... G_k * x_k` where `x_i` are encrypted, `Y` and `G_i` are public. +//! The only difference with implementation in the module `rdkgith` is that a more efficient variant of Elgamal called batched +//! Elgamal is used where rather than generating a new shared secret for each witness's share, only 1 shared secret is generated +//! per party and then independent OTPs are derived for each witness share by "appending" counters to that shared secret. + +// TODO: This contains a lot of duplicate code from module `rdkgith`. Abstract the encryption scheme to reduce duplication + +use crate::{error::VerifiableEncryptionError, tz_21::util::get_unique_indices_to_hide}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; +use ark_ff::{Field, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, rand::RngCore, UniformRand}; +use digest::{Digest, DynDigest}; +use dock_crypto_utils::{ + aliases::FullDigest, + elgamal::BatchedHashedElgamalCiphertext, + ff::{powers, powers_starting_from}, + hashing_utils::hash_to_field, + msm::WindowTable, +}; +use secret_sharing_and_dkg::{ + common::{lagrange_basis_at_0, lagrange_basis_at_0_for_all}, + shamir_ss::deal_secret, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct RdkgithProof< + const NUM_PARTIES: usize, + const THRESHOLD: usize, + const NUM_PARTIES_MINUS_THRESHOLD: usize, + G: AffineRepr, +> { + pub challenge: Vec, + /// Commitment to the coefficients of polynomials + pub poly_commitments: [G; THRESHOLD], + /// Ciphertexts of the shares. The first element of the tuple is the party index + pub ciphertexts: [(u16, BatchedHashedElgamalCiphertext); NUM_PARTIES_MINUS_THRESHOLD], + /// Revealed shares and randomness used for encryption. The first element of the tuple is the party index + pub shares_and_enc_rands: [(u16, Vec, G::ScalarField); THRESHOLD], +} + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct CompressedCiphertext( + [BatchedHashedElgamalCiphertext; SUBSET_SIZE], + /// This is helper data for making the decryptor more efficient. The decryptor could compute this + /// on its own from the proof. + [G::ScalarField; SUBSET_SIZE], +); + +impl< + const NUM_PARTIES: usize, + const THRESHOLD: usize, + const NUM_PARTIES_MINUS_THRESHOLD: usize, + G: AffineRepr, + > RdkgithProof +{ + const CHECK_THRESHOLD: () = assert!(THRESHOLD + NUM_PARTIES_MINUS_THRESHOLD == NUM_PARTIES); + + pub fn new( + rng: &mut R, + witnesses: Vec, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Self { + let () = Self::CHECK_THRESHOLD; + let witness_count = witnesses.len(); + assert_eq!(comm_key.len(), witness_count); + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + + let enc_key_table = WindowTable::new(NUM_PARTIES * witness_count, enc_key.into_group()); + let enc_gen_table = WindowTable::new(NUM_PARTIES * witness_count, enc_gen.into_group()); + + let mut commitments = [G::zero(); THRESHOLD]; + let mut polys = Vec::with_capacity(witness_count); + let mut shares = Vec::with_capacity(witness_count); + let enc_rands = [G::ScalarField::rand(rng); NUM_PARTIES]; + let mut cts: [BatchedHashedElgamalCiphertext; NUM_PARTIES] = + [(); NUM_PARTIES].map(|_| BatchedHashedElgamalCiphertext::::default()); + + // Secret share each witness such that `THRESHOLD` + 1 shares are needed to reconstruct + for w in witnesses { + let (s, mut poly) = + deal_secret::(rng, w, THRESHOLD as u16 + 1, NUM_PARTIES as u16) + .unwrap(); + shares.push(s); + // 0th coefficient is the witness + poly.coeffs.remove(0); + polys.push(poly); + } + // Commit to coefficients of the polynomials + cfg_iter_mut!(commitments) + .enumerate() + .for_each(|(i, cm_i)| { + let coeffs = cfg_into_iter!(0..witness_count) + .map(|j| polys[j].coeffs[i]) + .collect::>(); + *cm_i = G::Group::msm_unchecked(comm_key, &coeffs).into_affine(); + }); + // Encrypt each share + cfg_iter_mut!(cts).enumerate().for_each(|(i, ct)| { + let shares_i = cfg_iter!(shares).map(|s| s.0[i].share).collect::>(); + *ct = BatchedHashedElgamalCiphertext::new_given_randomness_and_window_tables::( + &shares_i, + &enc_rands[i], + &enc_key_table, + &enc_gen_table, + ); + }); + + for i in 0..THRESHOLD { + hash_elem!(commitments[i], hasher, to_hash); + hash_elem!(cts[i], hasher, to_hash); + } + + let challenge = Box::new(hasher).finalize().to_vec(); + // Indices of the `NUM_PARTIES_MINUS_THRESHOLD` parties for which ciphertexts of the shares will be given to the verifier. + let indices_to_hide = get_unique_indices_to_hide::( + &challenge, + NUM_PARTIES_MINUS_THRESHOLD as u16, + NUM_PARTIES as u16, + ); + + let mut ciphertexts: [(u16, BatchedHashedElgamalCiphertext); + NUM_PARTIES_MINUS_THRESHOLD] = [(); NUM_PARTIES_MINUS_THRESHOLD] + .map(|_| (0, BatchedHashedElgamalCiphertext::::default())); + let mut shares_and_enc_rands: [(u16, Vec, G::ScalarField); THRESHOLD] = + [(); THRESHOLD].map(|_| (0, Vec::with_capacity(witness_count), G::ScalarField::zero())); + + // Prepare `THRESHOLD` number of shares and encryption randomness and `NUM_PARTIES_MINUS_THRESHOLD` number of ciphertexts to share with the verifier + let mut ctx_idx = 0; + let mut s_idx = 0; + for i in 0..NUM_PARTIES { + if indices_to_hide.contains(&(i as u16)) { + ciphertexts[ctx_idx] = (i as u16, cts[i].clone()); + ctx_idx += 1; + } else { + shares_and_enc_rands[s_idx].0 = i as u16; + shares_and_enc_rands[s_idx].2 = enc_rands[i]; + for j in 0..witness_count { + shares_and_enc_rands[s_idx].1.push(shares[j].0[i].share); + } + s_idx += 1; + } + } + + debug_assert_eq!(ctx_idx, NUM_PARTIES_MINUS_THRESHOLD); + debug_assert_eq!(s_idx, THRESHOLD); + + Self { + challenge, + poly_commitments: commitments, + ciphertexts, + shares_and_enc_rands, + } + } + + fn verify( + &self, + commitment: &G, + comm_key: &[G], + enc_key: &G, + enc_gen: &G, + ) -> Result<(), VerifiableEncryptionError> { + let () = Self::CHECK_THRESHOLD; + let witness_count = comm_key.len(); + for i in 0..NUM_PARTIES_MINUS_THRESHOLD { + assert_eq!(self.ciphertexts[i].1.batch_size(), witness_count); + } + for i in 0..THRESHOLD { + assert_eq!(self.shares_and_enc_rands[i].1.len(), witness_count); + } + + let hidden_indices = get_unique_indices_to_hide::( + &self.challenge, + NUM_PARTIES_MINUS_THRESHOLD as u16, + NUM_PARTIES as u16, + ); + for (i, _) in self.ciphertexts.iter() { + assert!(hidden_indices.contains(i)); + } + for (i, _, _) in self.shares_and_enc_rands.iter() { + assert!(!hidden_indices.contains(i)); + } + + let mut hasher = D::default(); + let mut to_hash = Vec::with_capacity(commitment.compressed_size()); + + hash_elem!(commitment, hasher, to_hash); + for c in comm_key { + hash_elem!(c, hasher, to_hash); + } + hash_elem!(enc_key, hasher, to_hash); + hash_elem!(enc_gen, hasher, to_hash); + + let enc_key_table = WindowTable::new(NUM_PARTIES * witness_count, enc_key.into_group()); + let enc_gen_table = WindowTable::new(NUM_PARTIES * witness_count, enc_gen.into_group()); + + let mut cts: [BatchedHashedElgamalCiphertext; NUM_PARTIES] = + [(); NUM_PARTIES].map(|_| BatchedHashedElgamalCiphertext::::default()); + + cfg_iter_mut!(cts).enumerate().for_each(|(i, ct)| { + if hidden_indices.contains(&(i as u16)) { + // Ciphertexts given in the proof + for (k, c) in &self.ciphertexts { + if i as u16 == *k { + *ct = c.clone(); + break + } + } + } else { + for (k, s, r) in &self.shares_and_enc_rands { + // Create ciphertexts for shares and randomness given in the proof + if i as u16 == *k { + *ct = BatchedHashedElgamalCiphertext::new_given_randomness_and_window_tables::(s, r, &enc_key_table, &enc_gen_table); + break + } + } + } + }); + + for i in 0..THRESHOLD { + hash_elem!(self.poly_commitments[i], hasher, to_hash); + hash_elem!(cts[i], hasher, to_hash); + } + + let challenge = Box::new(hasher).finalize().to_vec(); + if challenge != self.challenge { + return Err(VerifiableEncryptionError::InvalidProof); + } + + // Need to check that the commitment to the coefficients of polynomials are consistent with the commitment to the shares. + // Eg. each party i, check if G_1 * F_1(i) + ... + G_k * F_k(i) == commitment + poly_commitments[0] * i + poly_commitments[1] * i^2 + ... + poly_commitments[t-1] * i^{t-1} + // Each check requires an MSM, which is expensive as we have to do `THRESHOLD` number of checks requiring `THRESHOLD`. So we combine these `THRESHOLD` + // checks into 1 by using a random linear combination. i.e. rather than doing `THRESHOLD` checks of the form `LHS_i == RHS_i`, verifier generates `THRESHOLD` + // number of random values `r_1, r_2, .., t_t` and checks if + // `LHS_1 * r_1 + LHS_2 * r_2 + ... LHS_t * r_t == RHS_1 * r_1 + RHS_2 * r_2 + ... RHS_t * r_t` --- (1) + // New LHS = `L` = `G_1 * ( F_1(1)*r_1 + F_1(2)*r_2 + ... + F_1(t)*r_t ) + G_2 * ( F_2(1)*r_1 + F_2(2)*r_2 + ... + F_2(t)*r_t ) + ... G_k * ( F_k(1)*r_1 + F_k(2)*r_2 + ... + F_k(t)*r_t )` --- (2) + // New RHS = `R` = `commitment * (r_1 + r_2 + ... r_t) + poly_commitments[0] * (r_1*1 + r_2*2 + ... r_t*t) + ... + poly_commitments[t-1]* (r_1*1^t + r_2*2^t + ... r_t*t^t)` + + let random = hash_to_field::(b"", &D::digest(&self.challenge)); + // Create many randoms from single random + // randoms = [1, random, random^2, ..., random^{t-1}], randoms[j] = random^j + let randoms = powers::(&random, THRESHOLD as u32); + + // For each witness, create sum of its share multiplied by a random value. + // For witness i, create \sum_{j=1 to THRESHOLD}{s_{i,j} * randoms[j]}. These sums for the scalars which when multiplied + // with commitment key give the new LHS `L` in above equation (2) + let evals = cfg_into_iter!(0..witness_count) + .map(|i| { + cfg_iter!(self.shares_and_enc_rands) + .enumerate() + .map(|(j, (_, s, _))| s[i] * randoms[j]) + .sum::() + }) + .collect::>(); + + // Powers of party indices, and each index's power multiplied by a random. s_i is the party index + // [ [1, s_1, {s_1}^2, ..., {s_1}^t], [random, random*s_2, random*{s_2}^2, ..., random*{s_2}^t], [random^2, random^2*s_3, random^2*{s_3}^2, ..., random^2*{s_3}^t], ... [random^{t-1}, random^{t-1}*s_t, ..., random^{t-1}*{s_t}^{t-1}] ] + let pows = cfg_into_iter!(randoms) + .zip(cfg_iter!(self.shares_and_enc_rands)) + .map(|(r, (j, _, _))| { + powers_starting_from::( + r, + &G::ScalarField::from(j + 1), + THRESHOLD as u32 + 1, + ) + }) + .collect::>(); + + // [1 + random + random^2 + .. + random^{t-1}, s_1 + random*s_2 + random^2*s_3 .. + random^{t-1}*s_t, .., {s_1}^t + random*{s_2}^t + .. random^{t-1}*{s_t}^{t-1}] + let mut power_sums = cfg_into_iter!(0..THRESHOLD + 1) + .map(|i| { + cfg_into_iter!(0..THRESHOLD) + .map(|j| pows[j][i]) + .sum::() + }) + .collect::>(); + + let mut c = self.poly_commitments.to_vec(); + c.insert(0, *commitment); + + // if G::Group::msm_unchecked(comm_key, &evals) != G::Group::msm_unchecked(&c, &power_sums) { + // return Err(VerifiableEncryptionError::InvalidProof); + // } + // Convert above 2 MSMs into 1 + c.extend_from_slice(comm_key); + let mut evals = cfg_into_iter!(evals).map(|e| -e).collect::>(); + power_sums.append(&mut evals); + if G::Group::msm_unchecked(&c, &power_sums) != G::Group::zero() { + return Err(VerifiableEncryptionError::InvalidProof); + } + + Ok(()) + } + + pub fn compress( + &self, + ) -> CompressedCiphertext { + let () = Self::CHECK_THRESHOLD; + const { assert!(SUBSET_SIZE <= NUM_PARTIES_MINUS_THRESHOLD) }; + let hidden_indices = get_unique_indices_to_hide::( + &self.challenge, + NUM_PARTIES_MINUS_THRESHOLD as u16, + NUM_PARTIES as u16, + ); + for (i, _) in self.ciphertexts.iter() { + assert!(hidden_indices.contains(i)); + } + for (i, _, _) in self.shares_and_enc_rands.iter() { + assert!(!hidden_indices.contains(i)); + } + let witness_count = self.ciphertexts[0].1.batch_size(); + let mut compressed_cts: [BatchedHashedElgamalCiphertext; SUBSET_SIZE] = + [(); SUBSET_SIZE].map(|_| BatchedHashedElgamalCiphertext::::default()); + + let mut opened_indices = Vec::with_capacity(THRESHOLD); + for i in 0..NUM_PARTIES as u16 { + if !hidden_indices.contains(&i) { + opened_indices.push(i + 1); + } + } + let hidden_indices = hidden_indices.into_iter().collect::>(); + // Choose a random subset of size `SUBSET_SIZE` from the indices of `hidden_indices` + // TODO: Check if this is secure. The objective is to avoid the use of random number generation on the verifier side + let subset = get_unique_indices_to_hide::( + &D::digest(&self.challenge), + SUBSET_SIZE as u16, + NUM_PARTIES_MINUS_THRESHOLD as u16, + ) + .into_iter() + .map(|i| hidden_indices[i as usize]) + .collect::>(); + let lagrange_basis_for_opened_indices = + lagrange_basis_at_0_for_all::(opened_indices.clone()).unwrap(); + let mut lagrange_basis_for_hidden_indices = [G::ScalarField::zero(); SUBSET_SIZE]; + cfg_iter_mut!(lagrange_basis_for_hidden_indices) + .enumerate() + .for_each(|(i, l_i)| { + *l_i = + lagrange_basis_at_0::(&opened_indices, subset[i] + 1).unwrap() + }); + cfg_iter_mut!(compressed_cts) + .enumerate() + .for_each(|(i, ct)| { + let party_index = subset[i] + 1; + let mut cphtx_idx = None; + for (j, (k, _)) in self.ciphertexts.iter().enumerate() { + if *k == subset[i] { + cphtx_idx = Some(j); + } + } + let cphtx_idx = cphtx_idx.unwrap(); + let deltas = cfg_iter!(opened_indices) + .enumerate() + .map(|(j, o)| { + let p = G::ScalarField::from(party_index); + let o = G::ScalarField::from(*o); + (lagrange_basis_for_opened_indices[j] * p) * (p - o).inverse().unwrap() + }) + .collect::>(); + ct.eph_pk = self.ciphertexts[cphtx_idx].1.eph_pk; + ct.encrypted = cfg_into_iter!(0..witness_count) + .map(|j| { + let mut e = self.ciphertexts[cphtx_idx].1.encrypted[j] + * lagrange_basis_for_hidden_indices[i]; + e += cfg_iter!(deltas) + .zip(cfg_iter!(self.shares_and_enc_rands)) + .map(|(d, (_, s, _))| *d * s[j]) + .sum::(); + e + }) + .collect::>(); + }); + + CompressedCiphertext(compressed_cts, lagrange_basis_for_hidden_indices) + } +} + +impl CompressedCiphertext { + pub fn decrypt( + &self, + dec_key: &G::ScalarField, + commitment: &G, + comm_key: &[G], + ) -> Result, VerifiableEncryptionError> { + let witness_count = comm_key.len(); + for i in 0..SUBSET_SIZE { + assert_eq!(self.0[i].batch_size(), witness_count); + let shared_secret = (self.0[i].eph_pk * dec_key).into_affine(); + + let witnesses = cfg_into_iter!(0..witness_count) + .map(|j| { + let otp = self.1[i] + * BatchedHashedElgamalCiphertext::otp::(&shared_secret, j as u32); + self.0[i].encrypted[j] - otp + }) + .collect::>(); + if *commitment == G::Group::msm_unchecked(comm_key, &witnesses).into_affine() { + return Ok(witnesses); + } + } + Err(VerifiableEncryptionError::DecryptionFailed) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_bls12_381::G1Affine; + use ark_ec::{CurveGroup, VariableBaseMSM}; + use ark_std::{ + rand::{prelude::StdRng, SeedableRng}, + UniformRand, + }; + use blake2::Blake2b512; + use dock_crypto_utils::elgamal::keygen; + use std::time::Instant; + + #[test] + fn prove_verify() { + fn check(count: usize) { + let mut rng = StdRng::seed_from_u64(0u64); + + let gen = G::rand(&mut rng); + let (sk, pk) = keygen::<_, G>(&mut rng, &gen); + + let witnesses = (0..count) + .map(|_| G::ScalarField::rand(&mut rng)) + .collect::>(); + let comm_key = (0..count).map(|_| G::rand(&mut rng)).collect::>(); + let commitment = G::Group::msm_unchecked(&comm_key, &witnesses).into_affine(); + + macro_rules! run_test { + ($parties: expr, $threshold: expr, $parties_minus_thresh: expr, $subset_size: expr) => {{ + println!( + "\n# witnesses = {}, # parties = {}, # threshold = {}, subset size = {}", + count, $parties, $threshold, $subset_size + ); + let start = Instant::now(); + let proof = RdkgithProof::< + $parties, + $threshold, + $parties_minus_thresh, + _, + >::new::<_, Blake2b512>( + &mut rng, + witnesses.clone(), + &commitment, + &comm_key, + &pk.0, + &gen, + ); + println!("Proof generated in: {:?}", start.elapsed()); + + let start = Instant::now(); + proof + .verify::(&commitment, &comm_key, &pk.0, &gen) + .unwrap(); + println!("Proof verified in: {:?}", start.elapsed()); + println!("Proof size: {:?}", proof.compressed_size()); + + let start = Instant::now(); + let ct = proof.compress::<$subset_size, Blake2b512>(); + println!("Ciphertext compressed in: {:?}", start.elapsed()); + println!("Ciphertext size: {:?}", ct.compressed_size()); + + let start = Instant::now(); + let decrypted_witnesses = ct + .decrypt::(&sk.0, &commitment, &comm_key) + .unwrap(); + println!("Ciphertext decrypted in: {:?}", start.elapsed()); + assert_eq!(decrypted_witnesses, witnesses); + }}; + } + + run_test!(132, 64, 68, 67); + run_test!(192, 36, 156, 145); + run_test!(512, 23, 489, 406); + run_test!(160, 80, 80, 55); + run_test!(256, 226, 30, 30); + run_test!(704, 684, 20, 20); + } + + check::(1); + check::(2); + check::(3); + check::(4); + check::(8); + } +} diff --git a/verifiable_encryption/src/tz_21/seed_tree.rs b/verifiable_encryption/src/tz_21/seed_tree.rs new file mode 100644 index 00000000..43099ccf --- /dev/null +++ b/verifiable_encryption/src/tz_21/seed_tree.rs @@ -0,0 +1,319 @@ +//! Binary tree to create a large number of random values (deterministically) from a single random seed. +//! Taken largely from [here](https://github.com/akiratk0355/verenc-mpcith/blob/main/dkgith/src/seed_tree.rs). + +use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::rand::RngCore; +use digest::{ExtendableOutput, Update, XofReader}; +use dock_crypto_utils::{aliases::FullDigest, hashing_utils::hash_to_field}; +use sha3::Shake256; +use zeroize::Zeroize; + +/// Type of a node of the tree. +pub type Seed = [u8; SEED_SIZE]; +/// A path of the tree from top to leaf (excluding root node) that lets you create the whole tree except a particular leaf. +pub type TreeOpening = [Seed; DEPTH]; + +/// A binary tree of `DEPTH` depth and `NUM_LEAVES` number of leaves and `NUM_TOTAL_NODES` denotes the number of +/// leaf and non-leaf nodes. Compile time checks ensure the relation between these constants. +/// This is creating by selecting a random root node seed, then hashing it to create 2 children, each of which is +/// hashed again to create 2 children and so on until the tree has `NUM_LEAVES` leaves. +/// The tree is represented as an array of nodes where the 0th index of array is the root node, +/// next `NUM_LEAVES` indices are for internal nodes and last `NUM_LEAVES` indices are for leaf nodes. +#[derive(Clone, Copy, Debug, CanonicalSerialize, CanonicalDeserialize, Zeroize)] +pub struct SeedTree< + const NUM_LEAVES: usize, + const DEPTH: usize, + const NUM_TOTAL_NODES: usize, + const SEED_SIZE: usize, +>(pub [Seed; NUM_TOTAL_NODES]); + +impl< + const NUM_LEAVES: usize, + const DEPTH: usize, + const NUM_TOTAL_NODES: usize, + const SEED_SIZE: usize, + > Default for SeedTree +{ + fn default() -> Self { + let nodes = [[0; SEED_SIZE]; NUM_TOTAL_NODES]; + Self(nodes) + } +} + +impl< + const NUM_LEAVES: usize, + const DEPTH: usize, + const NUM_TOTAL_NODES: usize, + const SEED_SIZE: usize, + > SeedTree +{ + const CHECK_LEAF_COUNT: () = assert!(NUM_LEAVES.is_power_of_two()); + const CHECK_DEPTH: () = assert!((1 << DEPTH) == NUM_LEAVES); + const CHECK_NODE_COUNT: () = assert!((2 * NUM_LEAVES - 1) == NUM_TOTAL_NODES); + + /// Create a new tree. + pub fn create(rng: &mut R, salt: &[u8], rep_index: usize) -> Self { + let root_seed = Self::random_seed(rng); + Self::create_given_root_node(root_seed, salt, rep_index) + } + + /// Given a root node, generate rest of nodes deterministically. + // pub fn create_given_root_node(root_seed: &Seed, salt: &[u8], rep_index: usize) -> Self { + pub fn create_given_root_node( + root_seed: Seed, + salt: &[u8], + rep_index: usize, + ) -> Self { + let _ = Self::CHECK_LEAF_COUNT; + let _ = Self::CHECK_DEPTH; + let _ = Self::CHECK_NODE_COUNT; + + let mut nodes = [Self::zero_seed(); NUM_TOTAL_NODES]; + nodes[0] = root_seed; + let rep_index = rep_index as u16; + + for i in 0..NUM_LEAVES - 1 { + // Create 2 children of node at index [i] and set the left and right child nodes to them + // let (left, right) = Self::expand::(&nodes[i as usize], salt, rep_index, i); + let (left, right) = Self::expand(&nodes[i], salt, rep_index, i as u16); + nodes[Self::left_child_index(i as u16) as usize] = left; + nodes[Self::right_child_index(i as u16) as usize] = right; + } + + SeedTree(nodes) + } + + pub fn get_leaf(&self, leaf_index: u16) -> Seed { + assert!( + (leaf_index as usize) < NUM_LEAVES, + "get_leaf: leaf index too large" + ); + // First NUM_LEAVES - 1 of nodes are the root and internal nodes + self.0[NUM_LEAVES as usize - 1 + leaf_index as usize] + } + + /// Return the leaf of the tree but as a finite field element. + pub fn get_leaf_as_finite_field_element( + &self, + leaf_index: u16, + salt: &[u8], + rep_index: usize, + domain_separator: &[u8], + ) -> F { + let leaf = self.get_leaf(leaf_index); + let mut bytes = vec![]; + salt.serialize_compressed(&mut bytes).unwrap(); + leaf.serialize_compressed(&mut bytes).unwrap(); + leaf_index.serialize_compressed(&mut bytes).unwrap(); + rep_index.serialize_compressed(&mut bytes).unwrap(); + hash_to_field::(domain_separator, &bytes) + } + + /// Return nodes on a path from leaf level till root level - 1 (excluding root node as root node can create the whole tree) + /// that allow reconstructing all leaves at indices except `unopened_leaf_index` + pub fn open_seeds(&self, unopened_leaf_index: u16) -> TreeOpening { + assert!((unopened_leaf_index as usize) < NUM_LEAVES); + let mut current = unopened_leaf_index + Self::num_non_leaf_nodes(); + let mut out = [Self::zero_seed(); DEPTH]; + let mut to_reveal = 0; + // Go from bottom to top of the tree but don't add root node in the path. + // At each level, grab the sibling of the current node + while to_reveal < DEPTH { + let sibling = Self::sibling_index(current); + debug_assert_ne!(sibling, 0); + out[to_reveal] = self.0[sibling as usize]; + current = Self::parent_index(current); + to_reveal += 1; + } + + out + } + + /// Given a `TreeOpening`, create all the nodes of the tree except the leaf at `unopened_leaf_index` + pub fn reconstruct_tree( + unopened_leaf_index: u16, + tree_opening: &TreeOpening, + salt: &[u8], + rep_index: usize, + ) -> Self { + let _ = Self::CHECK_LEAF_COUNT; + let _ = Self::CHECK_DEPTH; + let _ = Self::CHECK_NODE_COUNT; + let mut unopened_node_index = unopened_leaf_index + NUM_LEAVES as u16 - 1; + let mut nodes = [Self::zero_seed(); NUM_TOTAL_NODES]; + + // Fill the tree from top to bottom, setting sibling nodes on the path of the unopened leaf + let mut next_insert = 0; + while next_insert < DEPTH { + nodes[Self::sibling_index(unopened_node_index) as usize] = tree_opening[next_insert]; + unopened_node_index = Self::parent_index(unopened_node_index); + next_insert += 1; + } + debug_assert_eq!(nodes[0], Self::zero_seed()); + + let zero_seed = nodes[0]; // we'll never have the root + // Iterate over all the non-leaf nodes except root node on the path to the leaf at `unopened_leaf_index` + // to eventually set the leaves except the leaf at `unopened_leaf_index` + for i in 1..NUM_LEAVES - 1 { + if nodes[i] != zero_seed { + let (left, right) = Self::expand(&nodes[i], salt, rep_index as u16, i as u16); + nodes[Self::left_child_index(i as u16) as usize] = left; + nodes[Self::right_child_index(i as u16) as usize] = right; + } + } + debug_assert_eq!(nodes[0], Self::zero_seed()); + + Self(nodes) + } + + /// Given a parent node, create its 2 children nodes + // fn expand(node: &Seed, salt: &[u8], rep_index: u16, node_index: u16) -> (Seed, Seed) { + fn expand( + node: &Seed, + salt: &[u8], + rep_index: u16, + node_index: u16, + ) -> (Seed, Seed) { + // let mut hasher = D::new(); + // Digest::update(&mut hasher, &salt); + let mut hasher = Shake256::default(); + hasher.update(&salt); + hasher.update(&rep_index.to_le_bytes()); + hasher.update(&node_index.to_le_bytes()); + hasher.update(node); + let mut reader = hasher.finalize_xof(); + let mut left = [0u8; SEED_SIZE]; + let mut right = [0u8; SEED_SIZE]; + reader.read(&mut left); + reader.read(&mut right); + (left, right) + } + + const fn num_non_leaf_nodes() -> u16 { + NUM_LEAVES as u16 - 1 + } + + pub fn depth() -> u16 { + let n = NUM_LEAVES as f32; + n.log2().ceil() as u16 + } + + fn left_child_index(node_index: u16) -> u16 { + 2 * node_index + 1 + } + + fn right_child_index(node_index: u16) -> u16 { + 2 * node_index + 2 + } + + fn parent_index(node_index: u16) -> u16 { + (node_index - 1) / 2 + } + + fn sibling_index(node_index: u16) -> u16 { + if node_index % 2 == 1 { + node_index + 1 + } else { + node_index - 1 + } + } + + pub const fn zero_seed() -> Seed { + [0; SEED_SIZE] + } + + pub fn random_seed(rng: &mut R) -> Seed { + let mut seed = [0u8; SEED_SIZE]; + rng.fill_bytes(&mut seed); + seed + } +} + +pub fn get_num_total_nodes(num_leaves: u16) -> u16 { + 2 * num_leaves - 1 +} + +pub fn get_num_leaves(depth: u16) -> u16 { + 1 << depth +} + +pub fn get_depth(num_leaves: u16) -> u16 { + let n = num_leaves as f32; + n.log2().ceil() as u16 +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_std::rand::{prelude::StdRng, SeedableRng}; + + fn random_vec(len: usize) -> Vec { + let mut rng = StdRng::from_entropy(); + let mut random_vector = vec![0u8; len]; + rng.fill_bytes(&mut random_vector); + random_vector + } + + #[test] + fn seed_tree_create() { + let mut rng = StdRng::from_entropy(); + const NUM_LEAVES: usize = 8; + const DEPTH: usize = 3; + const NUM_NODES: usize = 15; + const SEED_SIZE: usize = 16; + let salt = random_vec(32); + let rep_index = 5; + + let tree = SeedTree::::create( + &mut rng, + salt.as_slice(), + rep_index, + ); + for i in 0..NUM_LEAVES { + let leaf_seed_i = tree.get_leaf(i as u16); + assert_ne!( + leaf_seed_i, + SeedTree::::zero_seed() + ); + } + } + + #[test] + fn seed_tree_openings() { + let mut rng = StdRng::from_entropy(); + const NUM_LEAVES: usize = 8; + const DEPTH: usize = 3; + const NUM_NODES: usize = 15; + const SEED_SIZE: usize = 16; + let salt = random_vec(32); + let rep_index = 5; + + let tree = SeedTree::::create( + &mut rng, + salt.as_slice(), + rep_index, + ); + + for unopened_party in 0..NUM_LEAVES - 1 { + let opening_data = tree.open_seeds(unopened_party as u16); + let tree2 = SeedTree::::reconstruct_tree( + unopened_party as u16, + &opening_data, + &salt, + rep_index, + ); + + for i in 0..NUM_LEAVES { + if i != unopened_party { + assert_eq!(tree.get_leaf(i as u16), tree2.get_leaf(i as u16)); + } else { + assert_eq!( + tree2.get_leaf(i as u16), + SeedTree::::zero_seed() + ); + } + } + } + } +} diff --git a/verifiable_encryption/src/tz_21/util.rs b/verifiable_encryption/src/tz_21/util.rs new file mode 100644 index 00000000..a27e4d85 --- /dev/null +++ b/verifiable_encryption/src/tz_21/util.rs @@ -0,0 +1,71 @@ +use digest::Digest; +use std::collections::BTreeSet; + +#[macro_export] +macro_rules! hash_elem { + ($elem: expr, $hasher: ident, $buffer: ident) => {{ + $elem.serialize_compressed(&mut $buffer).unwrap(); + DynDigest::update(&mut $hasher, &$buffer); + $buffer.clear(); + }}; +} + +/// Use the given challenge to get a set of indices of size `num_indices` where each index is < `num_parties`. +/// Will hash the challenge repeatedly unless the set of required size is created. +pub fn get_unique_indices_to_hide( + challenge: &[u8], + num_indices: u16, + num_parties: u16, +) -> BTreeSet { + // Computes the index of the unopened party. Using set to avoid duplicate indices + let mut output = BTreeSet::::new(); + let mut c = challenge.to_vec(); + while (output.len() as u16) < num_indices { + // Divide the bytearray into 2-byte chunks and each chunk is used to create a u16 + for c_i in c.chunks(2) { + if c_i.len() == 2 { + output.insert((((c_i[0] as u16) << 8) | (c_i[1] as u16)) % num_parties); + } else { + output.insert(c_i[0] as u16); + } + if output.len() as u16 == num_indices { + break; + } + } + if output.len() as u16 != num_indices { + c = D::digest(c.as_slice()).to_vec(); + } + } + + output +} + +/// Use the given challenge to get a list of indices of size `num_indices` where each index is < `num_parties`. +/// Will hash the challenge repeatedly unless the list of required size is created. +pub fn get_indices_to_hide( + challenge: &[u8], + num_indices: u16, + num_parties: u16, +) -> Vec { + // Computes the index of the unopened party in each of the repetitions + let mut output = Vec::with_capacity(num_indices as usize); + let mut c = challenge.to_vec(); + while (output.len() as u16) < num_indices { + // Divide the bytearray into 2-byte chunks and each chunk is used to create a u16 + for c_i in c.chunks(2) { + if c_i.len() == 2 { + output.push((((c_i[0] as u16) << 8) | (c_i[1] as u16)) % num_parties); + } else { + output.push(c_i[0] as u16); + } + if output.len() as u16 == num_indices { + break; + } + } + if output.len() as u16 != num_indices { + c = D::digest(c.as_slice()).to_vec(); + } + } + + output +}