diff --git a/bbs_plus/Cargo.toml b/bbs_plus/Cargo.toml index 1840618c..66021901 100644 --- a/bbs_plus/Cargo.toml +++ b/bbs_plus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bbs_plus" -version = "0.20.0" +version = "0.21.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -23,10 +23,10 @@ sha3 = { version = "0.10.6", default-features = false } serde.workspace = true serde_with.workspace = true zeroize.workspace = true -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -oblivious_transfer_protocols = { version = "0.7.0", default-features = false, path = "../oblivious_transfer" } -secret_sharing_and_dkg = { version = "0.11.0", default-features = false, path = "../secret_sharing_and_dkg" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +oblivious_transfer_protocols = { version = "0.8.0", default-features = false, path = "../oblivious_transfer" } +secret_sharing_and_dkg = { version = "0.12.0", default-features = false, path = "../secret_sharing_and_dkg" } [dev-dependencies] blake2.workspace = true diff --git a/bbs_plus/src/setup.rs b/bbs_plus/src/setup.rs index 9b671edd..5181ea03 100644 --- a/bbs_plus/src/setup.rs +++ b/bbs_plus/src/setup.rs @@ -72,7 +72,8 @@ use dock_crypto_utils::{ use itertools::process_results; #[cfg(feature = "parallel")] -use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use rayon::prelude::*; + use serde::{Deserialize, Serialize}; use serde_with::serde_as; diff --git a/bulletproofs_plus_plus/Cargo.toml b/bulletproofs_plus_plus/Cargo.toml index 1625f22a..a0761f10 100644 --- a/bulletproofs_plus_plus/Cargo.toml +++ b/bulletproofs_plus_plus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bulletproofs_plus_plus" -version = "0.4.0" +version = "0.5.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -18,7 +18,7 @@ serde.workspace = true serde_with.workspace = true zeroize.workspace = true rayon = { workspace = true, optional = true } -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } [dev-dependencies] blake2.workspace = true diff --git a/coconut/Cargo.toml b/coconut/Cargo.toml index 35bfa7db..f6dc410c 100644 --- a/coconut/Cargo.toml +++ b/coconut/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "coconut-crypto" -version = "0.9.0" +version = "0.10.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -22,9 +22,9 @@ itertools.workspace = true zeroize.workspace = true serde_with.workspace = true rayon = { workspace = true, optional = true } -utils = { package = "dock_crypto_utils", version = "0.18.0", default-features = false, path = "../utils" } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } -secret_sharing_and_dkg = { version = "0.11.0", default-features = false, path = "../secret_sharing_and_dkg" } +utils = { package = "dock_crypto_utils", version = "0.19.0", default-features = false, path = "../utils" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } +secret_sharing_and_dkg = { version = "0.12.0", default-features = false, path = "../secret_sharing_and_dkg" } [dev-dependencies] blake2.workspace = true diff --git a/compressed_sigma/Cargo.toml b/compressed_sigma/Cargo.toml index 8f4f6ea2..3863046a 100644 --- a/compressed_sigma/Cargo.toml +++ b/compressed_sigma/Cargo.toml @@ -15,7 +15,7 @@ ark-std.workspace = true ark-poly.workspace = true rayon = {workspace = true, optional = true} digest.workspace = true -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } [dev-dependencies] blake2.workspace = true diff --git a/delegatable_credentials/Cargo.toml b/delegatable_credentials/Cargo.toml index d733c9b9..ab4b34c2 100644 --- a/delegatable_credentials/Cargo.toml +++ b/delegatable_credentials/Cargo.toml @@ -20,8 +20,8 @@ serde.workspace = true serde_with.workspace = true zeroize.workspace = true num-bigint = { version = "0.4.0", default-features = false } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } [dependencies.num-integer] version = "0.1.42" diff --git a/kvac/Cargo.toml b/kvac/Cargo.toml index d4398ee2..a5a0b27b 100644 --- a/kvac/Cargo.toml +++ b/kvac/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvac" -version = "0.3.0" +version = "0.4.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -18,8 +18,8 @@ rayon = {workspace = true, optional = true} serde.workspace = true serde_with.workspace = true itertools.workspace = true -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } [dev-dependencies] blake2.workspace = true diff --git a/kvac/src/bddt_2016/mac.rs b/kvac/src/bddt_2016/mac.rs index 893bdcab..3e8ba6ee 100644 --- a/kvac/src/bddt_2016/mac.rs +++ b/kvac/src/bddt_2016/mac.rs @@ -63,11 +63,12 @@ impl MAC { rng: &mut R, messages: &[G::ScalarField], secret_key: &SecretKey, - params: &MACParams, + params: impl AsRef>, ) -> Result { if messages.is_empty() { return Err(KVACError::NoMessageGiven); } + let params = params.as_ref(); expect_equality!( messages.len(), params.supported_message_count(), @@ -96,11 +97,12 @@ impl MAC { commitment: &G, uncommitted_messages: BTreeMap, sk: &SecretKey, - params: &MACParams, + params: impl AsRef>, ) -> Result { if uncommitted_messages.is_empty() { return Err(KVACError::NoMessageGiven); } + let params = params.as_ref(); // `>` as commitment will have 0 or more messages. In practice, commitment should have // at least 1 message if uncommitted_messages.len() > params.supported_message_count() { @@ -135,19 +137,22 @@ impl MAC { pub fn verify( &self, messages: &[G::ScalarField], - sk: &SecretKey, - params: &MACParams, + sk: impl AsRef, + params: impl AsRef>, ) -> Result<(), KVACError> { if messages.is_empty() { return Err(KVACError::NoMessageGiven); } + let params = params.as_ref(); expect_equality!( messages.len(), params.supported_message_count(), KVACError::MessageCountIncompatibleWithMACParams ); let b = params.b(messages.iter().enumerate(), &self.s)?; - let e_plus_x_inv = (self.e + sk.0).inverse().ok_or(KVACError::CannotInvert0)?; + let e_plus_x_inv = (self.e + sk.as_ref()) + .inverse() + .ok_or(KVACError::CannotInvert0)?; if (b * e_plus_x_inv).into_affine() != self.A { return Err(KVACError::InvalidMAC); } @@ -171,11 +176,12 @@ impl ProofOfValidityOfMAC { mac: &MAC, secret_key: &SecretKey, public_key: &PublicKey, - params: &MACParams, + params: impl AsRef>, ) -> Self { let witness = secret_key.0; let blinding = G::ScalarField::rand(rng); let B = (mac.A * witness).into_affine(); + let params = params.as_ref(); let mut challenge_bytes = vec![]; // As witness has to be proven same in both protocols. let p1 = PokDiscreteLogProtocol::init(witness, blinding, &mac.A); @@ -196,11 +202,12 @@ impl ProofOfValidityOfMAC { mac: &MAC, messages: &[G::ScalarField], public_key: &PublicKey, - params: &MACParams, + params: impl AsRef>, ) -> Result<(), KVACError> { if self.sc_B.response != self.sc_pk.response { return Err(KVACError::InvalidMACProof); } + let params = params.as_ref(); // B = h + g * s + g_1 * m_1 + g_2 * m_2 + ... g_n * m_n let B = (params.b(messages.iter().enumerate(), &mac.s)? + mac.A * mac.e.neg()).into_affine(); @@ -246,7 +253,7 @@ mod tests { let proof = ProofOfValidityOfMAC::new::<_, Blake2b512>(&mut rng, &mac, &sk, &pk, ¶ms); proof - .verify::(&mac, &messages, &pk, ¶ms) + .verify::(&mac, &messages, &pk, params) .unwrap(); } @@ -297,6 +304,6 @@ mod tests { assert!(blinded_mac.verify(&messages, &sk, ¶ms).is_err()); let mac = blinded_mac.unblind(&blinding); - mac.verify(&messages, &sk, ¶ms).unwrap(); + mac.verify(&messages, sk, params).unwrap(); } } diff --git a/kvac/src/bddt_2016/setup.rs b/kvac/src/bddt_2016/setup.rs index 4ff4258c..b7ddd91e 100644 --- a/kvac/src/bddt_2016/setup.rs +++ b/kvac/src/bddt_2016/setup.rs @@ -178,3 +178,9 @@ impl AsRef for PublicKey { &self.0 } } + +impl AsRef> for MACParams { + fn as_ref(&self) -> &MACParams { + &self + } +} diff --git a/legogroth16/Cargo.toml b/legogroth16/Cargo.toml index bd25e97d..b0262e20 100644 --- a/legogroth16/Cargo.toml +++ b/legogroth16/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "legogroth16" -version = "0.13.0" +version = "0.14.0" authors = [ "arkworks contributors", "Dock.io" ] description = "An implementation of the LegoGroth16, the Legosnark variant of Groth16 zkSNARK proof system" repository.workspace = true @@ -29,7 +29,7 @@ num-bigint = { version = "0.4", default-features = false, optional = true } log = "0.4" ark-groth16 = { workspace = true, optional = true } ark-snark = { version = "^0.4.0", default-features = false, optional = true } -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } [dev-dependencies] csv = { version = "1" } diff --git a/oblivious_transfer/Cargo.toml b/oblivious_transfer/Cargo.toml index 952f2bc4..7590f5ba 100644 --- a/oblivious_transfer/Cargo.toml +++ b/oblivious_transfer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "oblivious_transfer_protocols" -version = "0.7.0" +version = "0.8.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -23,8 +23,8 @@ sha3 = { version = "0.10.6", default-features = false } aes = { version = "0.8.2", default-features = false } itertools.workspace = true byteorder = { version = "1.4", default-features = false } -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } [dev-dependencies] blake2.workspace = true diff --git a/oblivious_transfer/src/ot_based_multiplication/base_ot_multi_party_pairwise.rs b/oblivious_transfer/src/ot_based_multiplication/base_ot_multi_party_pairwise.rs index 500f7c5d..935ed659 100644 --- a/oblivious_transfer/src/ot_based_multiplication/base_ot_multi_party_pairwise.rs +++ b/oblivious_transfer/src/ot_based_multiplication/base_ot_multi_party_pairwise.rs @@ -55,7 +55,7 @@ pub struct BaseOTOutput { Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, )] #[serde(bound = "")] -pub struct SenderPubKeyAndProof(SenderPubKey, PokDiscreteLog); +pub struct SenderPubKeyAndProof(pub SenderPubKey, PokDiscreteLog); impl Participant { pub fn init( diff --git a/oblivious_transfer/src/ot_based_multiplication/batch_mul_multi_party.rs b/oblivious_transfer/src/ot_based_multiplication/batch_mul_multi_party.rs index a91bac0e..8de81c4d 100644 --- a/oblivious_transfer/src/ot_based_multiplication/batch_mul_multi_party.rs +++ b/oblivious_transfer/src/ot_based_multiplication/batch_mul_multi_party.rs @@ -54,7 +54,7 @@ pub struct Message1( /// Message sent from Party1 to Party2 of multiplication protocol. This message is created after Party1 processes `Message1` #[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Message2(CorrelationTag, RLC, MaskedInputs); +pub struct Message2(pub CorrelationTag, RLC, MaskedInputs); /// A participant's output on completion of the multiplication protocol #[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] diff --git a/proof_system/Cargo.toml b/proof_system/Cargo.toml index 61f9b0d7..14dcaf58 100644 --- a/proof_system/Cargo.toml +++ b/proof_system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "proof_system" -version = "0.29.0" +version = "0.30.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -27,18 +27,18 @@ zeroize.workspace = true itertools.workspace = true aead = {version = "0.5.2", default-features = false, features = [ "alloc" ]} chacha20poly1305 = {version = "0.10.1", default-features = false} -bbs_plus = { version = "0.20.0", default-features = false, path = "../bbs_plus" } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } -vb_accumulator = { version = "0.24.0", default-features = false, path = "../vb_accumulator" } -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -saver = { version = "0.16.0", default-features = false, path = "../saver" } -coconut-crypto = { version = "0.9.0", default-features = false, path = "../coconut" } +bbs_plus = { version = "0.21.0", default-features = false, path = "../bbs_plus" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } +vb_accumulator = { version = "0.25.0", default-features = false, path = "../vb_accumulator" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +saver = { version = "0.17.0", default-features = false, path = "../saver" } +coconut-crypto = { version = "0.10.0", default-features = false, path = "../coconut" } merlin = { package = "dock_merlin", version = "3.0.0", default-features = false, path = "../merlin" } -legogroth16 = { version = "0.13.0", default-features = false, features = ["circom", "aggregation"], path = "../legogroth16" } -bulletproofs_plus_plus = { version = "0.4.0", default-features = false, path = "../bulletproofs_plus_plus" } -smc_range_proof = { version = "0.4.0", default-features = false, path = "../smc_range_proof" } -short_group_sig = { version = "0.2.0", default-features = false, path = "../short_group_sig" } -kvac = { version = "0.3.0", default-features = false, path = "../kvac" } +legogroth16 = { version = "0.14.0", default-features = false, features = ["circom", "aggregation"], path = "../legogroth16" } +bulletproofs_plus_plus = { version = "0.5.0", default-features = false, path = "../bulletproofs_plus_plus" } +smc_range_proof = { version = "0.5.0", default-features = false, path = "../smc_range_proof" } +short_group_sig = { version = "0.3.0", default-features = false, path = "../short_group_sig" } +kvac = { version = "0.4.0", default-features = false, path = "../kvac" } [dev-dependencies] ark-bls12-381.workspace = true diff --git a/saver/Cargo.toml b/saver/Cargo.toml index 00f034a4..395295ca 100644 --- a/saver/Cargo.toml +++ b/saver/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "saver" -version = "0.16.0" +version = "0.17.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -20,8 +20,8 @@ rayon = {workspace = true, optional = true} serde.workspace = true serde_with.workspace = true zeroize.workspace = true -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -legogroth16 = { version = "0.13.0", default-features = false, features = ["aggregation"], path = "../legogroth16" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +legogroth16 = { version = "0.14.0", default-features = false, features = ["aggregation"], path = "../legogroth16" } merlin = { package = "dock_merlin", version = "3.0.0", default-features = false, path = "../merlin" } [dev-dependencies] diff --git a/schnorr_pok/Cargo.toml b/schnorr_pok/Cargo.toml index 97680972..8ffe4321 100644 --- a/schnorr_pok/Cargo.toml +++ b/schnorr_pok/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "schnorr_pok" -version = "0.18.0" +version = "0.19.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -19,7 +19,7 @@ ark-ec.workspace = true ark-std.workspace = true rayon = {workspace = true, optional = true} digest.workspace = true -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } serde.workspace = true serde_with.workspace = true zeroize.workspace = true diff --git a/secret_sharing_and_dkg/Cargo.toml b/secret_sharing_and_dkg/Cargo.toml index 9f843f19..c35ac3dd 100644 --- a/secret_sharing_and_dkg/Cargo.toml +++ b/secret_sharing_and_dkg/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "secret_sharing_and_dkg" -version = "0.11.0" +version = "0.12.0" edition.workspace = true authors.workspace = true license.workspace = true repository.workspace = true -description = "Secret sharing schemes like Shamir's, Feldman's, Pedersen's and Publicly Verifiable Secret Sharing scheme and DKGs like FROST" +description = "Secret sharing schemes like Shamir's, Feldman's, Pedersen's and Publicly Verifiable Secret Sharing scheme and DKGs like Gennaro's and FROST's" keywords = ["secret-sharing", "VSS", "PVSS", "DKG", "Shamir"] [dependencies] @@ -19,8 +19,8 @@ rayon = { workspace = true, optional = true } serde.workspace = true serde_with.workspace = true zeroize.workspace = true -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } [dev-dependencies] blake2.workspace = true diff --git a/secret_sharing_and_dkg/README.md b/secret_sharing_and_dkg/README.md index 9d347f4b..5a7968d7 100644 --- a/secret_sharing_and_dkg/README.md +++ b/secret_sharing_and_dkg/README.md @@ -9,7 +9,7 @@ Key Generation (DKG) and Publicly Verifiable Secret Sharing (PVSS) algorithms. D 1. [Pedersen Distributed Verifiable Secret Sharing](./src/pedersen_dvss.rs) 1. [Feldman Verifiable Secret Sharing](./src/feldman_vss.rs) 1. [Feldman Distributed Verifiable Secret Sharing](./src/feldman_dvss_dkg.rs) -1. [Secure Distributed Key Generation for Discrete-Log Based Cryptosystems](./src/gennaro_dkg.rs) +1. [Gennaro DKG from the paper Secure Distributed Key Generation for Discrete-Log Based Cryptosystems](./src/gennaro_dkg.rs) 1. [Distributed Key Generation from FROST](./src/frost_dkg.rs) 1. [Distributed discrete log (DLOG) check](./src/distributed_dlog_check) 1. [Publicly Verifiable Secret Sharing](./src/baghery_pvss) \ No newline at end of file diff --git a/secret_sharing_and_dkg/src/abcp_dkg.rs b/secret_sharing_and_dkg/src/abcp_dkg.rs new file mode 100644 index 00000000..57e646ff --- /dev/null +++ b/secret_sharing_and_dkg/src/abcp_dkg.rs @@ -0,0 +1,538 @@ +//! Distributed Key Generation protocol as described in Fig. 4 of the paper [VSS from Distributed ZK Proofs and Applications](https://eprint.iacr.org/2023/992.pdf) + +#![allow(non_snake_case)] + +use crate::{ + common::{ParticipantId, ShareId}, + error::SSError, + shamir_ss, +}; +use ark_ec::{AffineRepr, CurveGroup}; +use ark_ff::PrimeField; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{cfg_into_iter, collections::BTreeMap, rand::RngCore, vec, vec::Vec, UniformRand}; +use digest::Digest; +use dock_crypto_utils::{ + commitment::PedersenCommitmentKey, expect_equality, serde_utils::ArkObjectBytes, +}; +use schnorr_pok::compute_random_oracle_challenge; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use zeroize::{Zeroize, ZeroizeOnDrop}; + +/// Share of the secret generated by a party +#[serde_as] +#[derive( + Default, + Clone, + Debug, + PartialEq, + Eq, + Zeroize, + ZeroizeOnDrop, + CanonicalSerialize, + CanonicalDeserialize, + Serialize, + Deserialize, +)] +pub struct VerifiableShare { + #[zeroize(skip)] + pub id: ShareId, + #[zeroize(skip)] + pub threshold: ShareId, + #[serde_as(as = "ArkObjectBytes")] + pub share: F, + pub blinding: F, + pub blinding_prime: F, +} + +/// State of a party in Round 1. +/// CMG is the group where commitments reside and PKG is the group of the public key +#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Round1> { + pub id: ParticipantId, + pub threshold: ShareId, + pub secret: PKG::ScalarField, + pub h: PKG, + pub shares: Vec>, + pub y_0: PKG::ScalarField, + pub y_0_prime: PKG::ScalarField, + /// Stores broadcast messages received from other parties in this round + pub received_msgs: BTreeMap>, +} + +/// Message broadcasted by a party in Round 1 +#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Round1Msg> { + pub sender_id: ParticipantId, + pub C: Vec, + pub C_prime: Vec, + pub C_0: PKG, + pub C_0_prime: PKG, + pub resp: DensePolynomial, +} + +/// State of a party in Round 1. +#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Round2> { + pub round1_state: Round1, + /// Stores broadcast messages received from other parties in this round + pub received_msgs: BTreeMap>, + /// Stores shares received from other parties in this round + pub received_shares: BTreeMap>, +} + +/// Message broadcasted by a party in Round 2 +#[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Round2Msg { + pub sender_id: ParticipantId, + pub h: PKG, + pub y_0: PKG::ScalarField, + pub y_0_prime: PKG::ScalarField, +} + +impl> Round1 { + pub fn start<'a, R: RngCore, D: Digest>( + rng: &mut R, + participant_id: ParticipantId, + threshold: ShareId, + total: ShareId, + comm_key: &PedersenCommitmentKey, + pk_gen: impl Into<&'a PKG> + Clone, + ) -> Result<(Self, Round1Msg), SSError> { + if participant_id == 0 || participant_id > total { + return Err(SSError::InvalidParticipantId(participant_id)); + } + let secret = PKG::ScalarField::rand(rng); + let (shares, f) = shamir_ss::deal_secret(rng, secret, threshold, total)?; + let b = as DenseUVPolynomial>::rand( + threshold as usize - 1, + rng, + ); + debug_assert_eq!(f.degree(), b.degree()); + let b_evals = cfg_into_iter!(1..=total) + .map(|i| b.evaluate(&PKG::ScalarField::from(i))) + .collect::>(); + let b_0 = b.coeffs[0]; + let y = (0..total) + .map(|_| PKG::ScalarField::rand(rng)) + .collect::>(); + let y_prime = (0..total) + .map(|_| PKG::ScalarField::rand(rng)) + .collect::>(); + let y_0 = PKG::ScalarField::rand(rng); + let y_0_prime = PKG::ScalarField::rand(rng); + let pk_gen = pk_gen.into().into_group(); + let h = pk_gen * secret; + let C_0 = (pk_gen * (b_0 * y_0)).into_affine(); + let C_0_prime = ((pk_gen + h) * y_0_prime).into_affine(); + let C = CMG::Group::normalize_batch( + &cfg_into_iter!(0..total as usize) + .map(|i| comm_key.commit_as_projective(&b_evals[i], &y[i])) + .collect::>(), + ); + let C_prime = CMG::Group::normalize_batch( + &cfg_into_iter!(0..total as usize) + .map(|i| comm_key.commit_as_projective(&shares.0[i].share, &y_prime[i])) + .collect::>(), + ); + + let mut chal_bytes = vec![]; + comm_key.g.serialize_compressed(&mut chal_bytes)?; + comm_key.h.serialize_compressed(&mut chal_bytes)?; + C_0.serialize_compressed(&mut chal_bytes)?; + C_0_prime.serialize_compressed(&mut chal_bytes)?; + for i in 0..C.len() { + C[i].serialize_compressed(&mut chal_bytes)?; + C_prime[i].serialize_compressed(&mut chal_bytes)?; + } + let d = compute_random_oracle_challenge::(&chal_bytes); + let r = &b - &(&f * d); + let msg = Round1Msg { + sender_id: participant_id, + C, + C_prime, + C_0, + C_0_prime, + resp: r, + }; + let shares = cfg_into_iter!(shares.0) + .zip(cfg_into_iter!(y)) + .zip(cfg_into_iter!(y_prime)) + .map(|((s, y_i), y_i_prime)| VerifiableShare { + id: s.id, + threshold, + share: s.share, + blinding: y_i, + blinding_prime: y_i_prime, + }) + .collect::>(); + let state = Round1 { + id: participant_id, + threshold, + secret, + h: h.into_affine(), + shares, + y_0, + y_0_prime, + received_msgs: BTreeMap::new(), + }; + Ok((state, msg)) + } + + pub fn add_received_message(&mut self, msg: Round1Msg) -> Result<(), SSError> { + if msg.sender_id == self.id { + return Err(SSError::SenderIdSameAsReceiver(msg.sender_id, self.id)); + } + if self.received_msgs.contains_key(&msg.sender_id) { + return Err(SSError::AlreadyProcessedFromSender(msg.sender_id)); + } + if msg.resp.degree() != self.threshold as usize - 1 { + return Err(SSError::DoesNotSupportThreshold(self.threshold)); + } + expect_equality!( + msg.C.len(), + msg.C_prime.len(), + SSError::InvalidNoOfCommitments + ); + expect_equality!( + msg.C.len(), + self.shares.len(), + SSError::InvalidNoOfCommitments + ); + self.received_msgs.insert(msg.sender_id, msg); + Ok(()) + } + + /// This should be called after "sufficient" messages have been received. + /// "sufficient" might be just the threshold or greater depending on the number of faults to be + /// tolerated. + pub fn finish(self) -> Result<(Round2, Round2Msg), SSError> { + // +1 because `self.received_msgs` does not contain message from itself + if self.threshold > (self.received_msgs.len() as ParticipantId + 1) { + return Err(SSError::BelowThreshold( + self.threshold, + self.received_msgs.len() as ParticipantId, + )); + } + let round1_state = self.clone(); + let msg = Round2Msg { + sender_id: self.id, + h: self.h, + y_0: self.y_0, + y_0_prime: self.y_0_prime, + }; + let round2 = Round2 { + round1_state, + received_msgs: BTreeMap::new(), + received_shares: BTreeMap::new(), + }; + Ok((round2, msg)) + } +} + +impl> Round2 { + pub fn add_received_message(&mut self, msg: Round2Msg) -> Result<(), SSError> { + if self.round1_state.id == msg.sender_id { + return Err(SSError::SenderIdSameAsReceiver( + self.round1_state.id, + msg.sender_id, + )); + } + if self.received_msgs.contains_key(&msg.sender_id) { + return Err(SSError::AlreadyProcessedFromSender(msg.sender_id)); + } + if !self.round1_state.received_msgs.contains_key(&msg.sender_id) { + return Err(SSError::ParticipantNotAllowedInPhase2(msg.sender_id)); + } + self.received_msgs.insert(msg.sender_id, msg); + Ok(()) + } + + pub fn add_received_share<'a, D: Digest>( + &mut self, + sender_id: ParticipantId, + share: VerifiableShare, + comm_key: &PedersenCommitmentKey, + pk_gen: impl Into<&'a PKG> + Clone, + ) -> Result<(), SSError> { + if self.round1_state.id == sender_id { + return Err(SSError::SenderIdSameAsReceiver( + self.round1_state.id, + sender_id, + )); + } + if self.round1_state.id != share.id { + return Err(SSError::UnequalParticipantAndShareId( + self.round1_state.id, + share.id, + )); + } + if self.received_shares.contains_key(&sender_id) { + return Err(SSError::AlreadyProcessedFromSender(sender_id)); + } + self.verify_share::(sender_id, &share, comm_key, pk_gen)?; + self.received_shares.insert(sender_id, share); + Ok(()) + } + + pub fn finish(self) -> Result<(PKG::ScalarField, PKG, PKG), SSError> { + // +1 because `self.received_msgs` does not contain message from itself + if self.round1_state.threshold > (self.received_msgs.len() as ParticipantId + 1) { + return Err(SSError::BelowThreshold( + self.round1_state.threshold, + self.received_msgs.len() as ParticipantId, + )); + } + if self.received_shares.len() != self.round1_state.received_msgs.len() { + return Err(SSError::MissingSomeParticipants( + (self.received_shares.len() - self.received_msgs.len()) as ParticipantId, + )); + } + let tpk = + self.received_msgs.values().map(|m| m.h).sum::() + self.round1_state.h; + Ok(( + self.round1_state.secret, + self.round1_state.h, + tpk.into_affine(), + )) + } + + /// Verify a received share. Used during normal operation or in processing complaints + pub fn verify_share<'a, D: Digest>( + &self, + sender_id: ParticipantId, + share: &VerifiableShare, + comm_key: &PedersenCommitmentKey, + pk_gen: impl Into<&'a PKG> + Clone, + ) -> Result<(), SSError> { + let round1_msg = self + .round1_state + .received_msgs + .get(&sender_id) + .ok_or(SSError::ParticipantNotAllowedInPhase2(sender_id))?; + let round2_msg = self + .received_msgs + .get(&sender_id) + .ok_or(SSError::MissingRound2MessageFrom(sender_id))?; + let self_idx = self.round1_state.id as usize - 1; + if comm_key.commit_as_projective(&share.share, &share.blinding_prime) + != round1_msg.C_prime[self_idx].into_group() + { + return Err(SSError::InvalidShare); + } + let pk_gen = *pk_gen.into(); + if (pk_gen + round2_msg.h) * round2_msg.y_0_prime != round1_msg.C_0_prime.into_group() { + return Err(SSError::InvalidShare); + } + let mut chal_bytes = vec![]; + comm_key.g.serialize_compressed(&mut chal_bytes)?; + comm_key.h.serialize_compressed(&mut chal_bytes)?; + round1_msg.C_0.serialize_compressed(&mut chal_bytes)?; + round1_msg.C_0_prime.serialize_compressed(&mut chal_bytes)?; + for i in 0..round1_msg.C.len() { + round1_msg.C[i].serialize_compressed(&mut chal_bytes)?; + round1_msg.C_prime[i].serialize_compressed(&mut chal_bytes)?; + } + let d = compute_random_oracle_challenge::(&chal_bytes); + let h_prime = pk_gen * round1_msg.resp.coeffs[0] + round2_msg.h * d; + if round1_msg.C_0.into_group() != h_prime * round2_msg.y_0 { + return Err(SSError::InvalidShare); + } + if comm_key.commit_as_projective( + &(round1_msg + .resp + .evaluate(&CMG::ScalarField::from(self.round1_state.id)) + + share.share * d), + &share.blinding, + ) != round1_msg.C[self_idx].into_group() + { + return Err(SSError::InvalidShare); + } + Ok(()) + } + + /// Called when got >= `threshold` complaints for `participant_id` and disqualifying a participant + pub fn remove_participant(&mut self, participant_id: ParticipantId) -> Result<(), SSError> { + if self.round1_state.id == participant_id { + return Err(SSError::CannotRemoveSelf(participant_id)); + } + self.received_shares.remove(&participant_id); + self.round1_state.received_msgs.remove(&participant_id); + Ok(()) + } + + /// Get share given by party with id `id`. + pub fn get_share_of_party( + &self, + id: ParticipantId, + ) -> Option<&VerifiableShare> { + self.received_shares.get(&id) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use ark_bls12_381::{G1Affine, G2Affine}; + use ark_ec::CurveGroup; + use ark_ff::PrimeField; + use ark_std::rand::{rngs::StdRng, SeedableRng}; + use blake2::Blake2b512; + use std::time::{Duration, Instant}; + + #[test] + fn distributed_key_generation() { + let mut rng = StdRng::seed_from_u64(0u64); + let ped_comm_key = PedersenCommitmentKey::::new::(b"test"); + let pk_gen_g1 = G1Affine::rand(&mut rng); + let pk_gen_g2 = G2Affine::rand(&mut rng); + + fn check>( + rng: &mut StdRng, + ped_comm_key: &PedersenCommitmentKey, + pk_gen: &PKG, + ) { + for (threshold, total) in vec![ + (2, 2), + (2, 3), + (2, 4), + (2, 5), + (3, 3), + (3, 4), + (3, 5), + (4, 5), + (4, 8), + (4, 9), + (4, 12), + (5, 5), + (5, 7), + (5, 10), + (5, 13), + (7, 10), + (7, 15), + ] { + let mut all_round1s = vec![]; + let mut all_round2s = vec![]; + let mut all_secrets = vec![]; + let mut all_round1_msgs = vec![]; + let mut all_round2_msgs = vec![]; + + println!("For {}-of-{}", threshold, total); + let mut round1_time = Duration::default(); + let mut round2_time = Duration::default(); + + // Each participant starts Round1 + for i in 1..=total { + let start = Instant::now(); + let (round1, msgs) = Round1::start::<_, Blake2b512>( + rng, + i as ParticipantId, + threshold as ShareId, + total as ShareId, + ped_comm_key, + pk_gen, + ) + .unwrap(); + round1_time += start.elapsed(); + + all_secrets.push(round1.secret.clone()); + all_round1s.push(round1); + all_round1_msgs.push(msgs); + } + + let start = Instant::now(); + // Each participant receives messages during Round1 + for i in 0..total { + for j in 0..total { + if i != j { + all_round1s[i] + .add_received_message(all_round1_msgs[j].clone()) + .unwrap(); + } + } + } + + // Each participant ends round1 and begins Round 2 + for i in 0..total { + let (round2, msgs) = all_round1s[i].clone().finish().unwrap(); + all_round2s.push(round2); + all_round2_msgs.push(msgs); + } + round1_time += start.elapsed(); + + let start = Instant::now(); + // Each participant receives messages during Round2 + for i in 0..total { + for j in 0..total { + if i != j { + all_round2s[i] + .add_received_message(all_round2_msgs[j].clone()) + .unwrap(); + } + } + } + round2_time += start.elapsed(); + + let start = Instant::now(); + // Each participant receives shares during Round2 + for i in 0..total { + for j in 0..total { + if i != j { + let share = all_round2s[j].round1_state.shares[i].clone(); + all_round2s[i] + .add_received_share::( + (j + 1) as ParticipantId, + share, + ped_comm_key, + pk_gen, + ) + .unwrap(); + } + } + } + round2_time += start.elapsed(); + + for i in 0..total { + assert_eq!(all_round2s[i].received_msgs.len(), total - 1); + assert_eq!(all_round2s[i].received_shares.len(), total - 1); + } + + // Each participant ends Round2 and ends up with his own keys and the threshold public key + let mut tk = None; + for i in 0..total { + let start = Instant::now(); + let (own_sk, own_pk, threshold_pk) = all_round2s[i].clone().finish().unwrap(); + round2_time += start.elapsed(); + assert_eq!(own_sk, all_secrets[i]); + assert_eq!( + own_pk, + pk_gen.mul_bigint(own_sk.into_bigint()).into_affine() + ); + if tk.is_none() { + tk = Some(threshold_pk); + } else { + // All generate the same threshold key + assert_eq!(tk, Some(threshold_pk)) + } + } + + assert_eq!( + tk.unwrap(), + (*pk_gen * all_secrets.into_iter().sum::()).into_affine() + ); + + println!("Time taken for round 1 {:?}", round1_time); + println!("Time taken for round 2 {:?}", round2_time); + } + } + + check(&mut rng, &ped_comm_key, &pk_gen_g1); + check(&mut rng, &ped_comm_key, &pk_gen_g2); + } +} diff --git a/secret_sharing_and_dkg/src/baghery_feldman_vss.rs b/secret_sharing_and_dkg/src/baghery_feldman_vss.rs new file mode 100644 index 00000000..87bebb7f --- /dev/null +++ b/secret_sharing_and_dkg/src/baghery_feldman_vss.rs @@ -0,0 +1,219 @@ +//! Feldman's Verifiable Secret Sharing Scheme, with faster verification but slower sharing, by K. Baghery. +//! As described in Fig 3 of the paper [A Unified Framework for Verifiable Secret Sharing](https://eprint.iacr.org/2023/1669) + +use crate::{ + common::{Share, ShareId, Shares}, + error::SSError, + shamir_ss, +}; +use ark_ec::{AffineRepr, CurveGroup}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{cfg_into_iter, rand::RngCore, vec, vec::Vec, UniformRand}; +use digest::Digest; +use dock_crypto_utils::{commitment::PedersenCommitmentKey, serde_utils::ArkObjectBytes}; +use schnorr_pok::compute_random_oracle_challenge; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Proof that the dealer shared the secret correctly. +#[serde_as] +#[derive( + Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, +)] +#[serde(bound = "")] +pub struct Proof { + #[serde_as(as = "Vec")] + pub commitments: Vec, + #[serde_as(as = "ArkObjectBytes")] + pub resp: DensePolynomial, + #[serde_as(as = "ArkObjectBytes")] + pub challenge: G::ScalarField, +} + +/// Generate a random secret with its shares according to Shamir's secret sharing. +/// At least `threshold` number of shares are needed to reconstruct the secret. +/// Returns the secret, shares, the polynomial and proof to verify the correct sharing +pub fn deal_random_secret( + rng: &mut R, + threshold: ShareId, + total: ShareId, + comm_key: &PedersenCommitmentKey, +) -> Result< + ( + G::ScalarField, + Shares, + DensePolynomial, + Proof, + ), + SSError, +> { + let secret = G::ScalarField::rand(rng); + let (shares, sharing_poly, proof) = + deal_secret::<_, _, D>(rng, secret, threshold, total, comm_key)?; + Ok((secret, shares, sharing_poly, proof)) +} + +/// Same as `deal_random_secret` above but accepts the secret to share +pub fn deal_secret( + rng: &mut R, + secret: G::ScalarField, + threshold: ShareId, + total: ShareId, + comm_key: &PedersenCommitmentKey, +) -> Result< + ( + Shares, + DensePolynomial, + Proof, + ), + SSError, +> { + let (shares, f) = shamir_ss::deal_secret(rng, secret, threshold, total)?; + let r = as DenseUVPolynomial>::rand( + threshold as usize - 1, + rng, + ); + debug_assert_eq!(f.degree(), r.degree()); + let r_evals = cfg_into_iter!(1..=total) + .map(|i| r.evaluate(&G::ScalarField::from(i))) + .collect::>(); + let commitments = G::Group::normalize_batch( + &cfg_into_iter!(0..total as usize) + .map(|i| comm_key.commit_as_projective(&shares.0[i].share, &r_evals[i])) + .collect::>(), + ); + let mut chal_bytes = vec![]; + comm_key.g.serialize_compressed(&mut chal_bytes)?; + comm_key.h.serialize_compressed(&mut chal_bytes)?; + for c in &commitments { + c.serialize_compressed(&mut chal_bytes)?; + } + let d = compute_random_oracle_challenge::(&chal_bytes); + let z = r + (&f * d); + Ok(( + shares, + f, + Proof { + commitments, + resp: z, + challenge: d, + }, + )) +} + +impl Proof { + pub fn verify( + &self, + share: &Share, + comm_key: &PedersenCommitmentKey, + ) -> Result<(), SSError> { + if self.resp.degree() != share.threshold as usize - 1 { + return Err(SSError::DoesNotSupportThreshold(share.threshold)); + } + let mut chal_bytes = vec![]; + comm_key.g.serialize_compressed(&mut chal_bytes)?; + comm_key.h.serialize_compressed(&mut chal_bytes)?; + for c in &self.commitments { + c.serialize_compressed(&mut chal_bytes)?; + } + let d = compute_random_oracle_challenge::(&chal_bytes); + let r = self.resp.evaluate(&G::ScalarField::from(share.id)) - d * share.share; + if self.commitments[share.id as usize - 1] != comm_key.commit(&share.share, &r) { + return Err(SSError::InvalidShare); + } + Ok(()) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use ark_bls12_381::{G1Affine, G2Affine}; + use ark_ff::One; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress}; + use ark_std::rand::{rngs::StdRng, SeedableRng}; + use blake2::Blake2b512; + use std::time::Instant; + use test_utils::test_serialization; + + #[test] + fn baghery_verifiable_secret_sharing() { + let mut rng = StdRng::seed_from_u64(0u64); + let comm_key1 = PedersenCommitmentKey::::new::(b"test"); + let comm_key2 = PedersenCommitmentKey::::new::(b"test"); + + fn check(rng: &mut StdRng, comm_key: &PedersenCommitmentKey) { + let mut checked_serialization = false; + for (threshold, total) in vec![ + (2, 2), + (2, 3), + (2, 4), + (2, 5), + (3, 3), + (3, 4), + (3, 5), + (4, 5), + (4, 8), + (4, 9), + (4, 12), + (5, 5), + (5, 7), + (5, 10), + (5, 13), + (7, 10), + (7, 15), + ] { + println!("For {}-of-{} sharing", threshold, total); + let start = Instant::now(); + let (secret, shares, _, proof) = deal_random_secret::<_, G, Blake2b512>( + rng, + threshold as ShareId, + total as ShareId, + &comm_key, + ) + .unwrap(); + println!("Time to create shares and proof {:?}", start.elapsed()); + println!( + "Proof size is {} bytes", + proof.serialized_size(Compress::Yes) + ); + + let mut noted_time = false; + + for share in &shares.0 { + // Wrong share fails to verify + let mut wrong_share = share.clone(); + wrong_share.share += G::ScalarField::one(); + assert!(proof.verify::(&wrong_share, &comm_key).is_err()); + + // Correct share verifies + let start = Instant::now(); + proof.verify::(&share, &comm_key).unwrap(); + if !noted_time { + println!("Time to verify share is {:?}", start.elapsed()); + noted_time = true; + } + } + + // Its assumed that reconstructor verifies each share before calling `reconstruct_secret` + let s = shares.reconstruct_secret().unwrap(); + assert_eq!(s, secret); + + // Test serialization + if !checked_serialization { + test_serialization!(Shares, shares); + test_serialization!(Share, shares.0[0]); + test_serialization!(Proof, proof); + checked_serialization = true; + } + } + } + + check(&mut rng, &comm_key1); + check(&mut rng, &comm_key2); + } +} diff --git a/secret_sharing_and_dkg/src/baghery_pvss/different_base.rs b/secret_sharing_and_dkg/src/baghery_pvss/different_base.rs index d18e20cd..0bca3fa4 100644 --- a/secret_sharing_and_dkg/src/baghery_pvss/different_base.rs +++ b/secret_sharing_and_dkg/src/baghery_pvss/different_base.rs @@ -1,14 +1,9 @@ -use crate::{ - baghery_pvss::{validate_threshold, Share}, - common::ShareId, - error::SSError, - shamir_ss, -}; +use crate::{baghery_pvss::Share, common::ShareId, error::SSError, shamir_ss}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{Field, PrimeField}; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::{rand::RngCore, vec, vec::Vec, UniformRand}; +use ark_std::{cfg_into_iter, rand::RngCore, vec, vec::Vec, UniformRand}; use digest::Digest; use dock_crypto_utils::{expect_equality, msm::WindowTable, serde_utils::ArkObjectBytes}; use schnorr_pok::compute_random_oracle_challenge; @@ -16,6 +11,9 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; use zeroize::{Zeroize, ZeroizeOnDrop}; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + /// Share encrypted for the party #[serde_as] #[derive( @@ -68,6 +66,8 @@ pub struct Proof { /// commitments to the shares with one encryption for each public key. Assumes the public keys are given /// in the increasing order of their ids in the context of secret sharing and number of public keys equals `total`. /// At least `threshold` number of share-commitments are needed to reconstruct the commitment to the secret. +/// If additional faults need to be handled, then the threshold should be increased, eg. if `f` number of faults +/// need to be handled and `threshold` number of parties are required to reconstruct the secret, `total >= threshold + f` /// `pk_base` is the base of the public keys (`g`) and `target_base` is the base for the secret share commitment (`j`) pub fn deal_random_secret<'a, R: RngCore, G: AffineRepr, D: Digest>( rng: &mut R, @@ -115,13 +115,15 @@ pub fn deal_secret<'a, R: RngCore, G: AffineRepr, D: Digest>( ), SSError, > { - validate_threshold(threshold, total)?; let (shares, f) = shamir_ss::deal_secret(rng, secret, threshold, total)?; let r = as DenseUVPolynomial>::rand( threshold as usize - 1, rng, ); debug_assert_eq!(f.degree(), r.degree()); + let r_evals = cfg_into_iter!(1..=total) + .map(|i| r.evaluate(&G::ScalarField::from(i))) + .collect::>(); let mut chal_bytes = vec![]; let mut enc_shares = vec![]; let mask_base = WindowTable::new(total as usize, *target_base + pk_base); @@ -131,7 +133,7 @@ pub fn deal_secret<'a, R: RngCore, G: AffineRepr, D: Digest>( let share_i = &shares.0[i]; debug_assert_eq!(share_i.id as usize, i + 1); // Use same blinding for both relations - let blinding = r.evaluate(&G::ScalarField::from(share_i.id)); + let blinding = r_evals[i]; let t_mask = pk * blinding; // `h_i * k_i` let mask = (pk * share_i.share).into_affine(); @@ -175,7 +177,6 @@ impl Proof { pk_base: &G, target_base: &G, ) -> Result<(), SSError> { - validate_threshold(threshold, total)?; expect_equality!( enc_shares.len(), public_keys.len(), diff --git a/secret_sharing_and_dkg/src/baghery_pvss/mod.rs b/secret_sharing_and_dkg/src/baghery_pvss/mod.rs index 24106488..7523a6c5 100644 --- a/secret_sharing_and_dkg/src/baghery_pvss/mod.rs +++ b/secret_sharing_and_dkg/src/baghery_pvss/mod.rs @@ -21,7 +21,7 @@ //! `j * k_i` is the message. This is implemented in [different_base](./different_base.rs). Note that both `j` and `g` must be in the same group. //! -use crate::{common::ShareId, error::SSError}; +use crate::common::ShareId; use ark_ec::AffineRepr; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::vec::Vec; @@ -57,11 +57,3 @@ pub struct Share { #[serde_as(as = "ArkObjectBytes")] pub share: G, } - -pub(crate) fn validate_threshold(threshold: ShareId, total: ShareId) -> Result<(), SSError> { - // This looks different from the paper since paper assume `t+1` parties can reconstruct but the code assumes `t` parties can reconstruct - if total < 2 * threshold { - return Err(SSError::InvalidThresholdOrTotal(threshold, total)); - } - Ok(()) -} diff --git a/secret_sharing_and_dkg/src/baghery_pvss/same_base.rs b/secret_sharing_and_dkg/src/baghery_pvss/same_base.rs index 85383554..2576b026 100644 --- a/secret_sharing_and_dkg/src/baghery_pvss/same_base.rs +++ b/secret_sharing_and_dkg/src/baghery_pvss/same_base.rs @@ -1,14 +1,9 @@ -use crate::{ - baghery_pvss::{validate_threshold, Share}, - common::ShareId, - error::SSError, - shamir_ss, -}; +use crate::{baghery_pvss::Share, common::ShareId, error::SSError, shamir_ss}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{Field, PrimeField}; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::{rand::RngCore, vec, vec::Vec, UniformRand}; +use ark_std::{cfg_into_iter, rand::RngCore, vec, vec::Vec, UniformRand}; use digest::Digest; use dock_crypto_utils::{expect_equality, serde_utils::ArkObjectBytes}; use schnorr_pok::compute_random_oracle_challenge; @@ -16,6 +11,9 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; use zeroize::{Zeroize, ZeroizeOnDrop}; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + /// Share encrypted for the party #[serde_as] #[derive( @@ -66,6 +64,8 @@ pub struct Proof { /// commitments to the shares with one encryption for each public key. Assumes the public keys are given /// in the increasing order of their ids in the context of secret sharing and number of public keys equals `total`. /// At least `threshold` number of share-commitments are needed to reconstruct the commitment to the secret. +/// If additional faults need to be handled, then the threshold should be increased, eg. if `f` number of faults +/// need to be handled and `threshold` number of parties are required to reconstruct the secret, `total >= threshold + f` pub fn deal_random_secret<'a, R: RngCore, G: AffineRepr, D: Digest>( rng: &mut R, threshold: ShareId, @@ -101,20 +101,22 @@ pub fn deal_secret<'a, R: RngCore, G: AffineRepr, D: Digest>( ), SSError, > { - validate_threshold(threshold, total)?; let (shares, f) = shamir_ss::deal_secret(rng, secret, threshold, total)?; let r = as DenseUVPolynomial>::rand( threshold as usize - 1, rng, ); debug_assert_eq!(f.degree(), r.degree()); + let r_evals = cfg_into_iter!(1..=total) + .map(|i| r.evaluate(&G::ScalarField::from(i))) + .collect::>(); let mut chal_bytes = vec![]; let mut enc_shares = vec![]; // NOTE: The following can be done in parallel for (i, pk) in public_keys.into_iter().enumerate() { let share_i = &shares.0[i]; debug_assert_eq!(share_i.id as usize, i + 1); - let t = pk * r.evaluate(&G::ScalarField::from(share_i.id)); + let t = pk * r_evals[i]; let enc_share_i = (pk * share_i.share).into_affine(); pk.serialize_compressed(&mut chal_bytes)?; t.serialize_compressed(&mut chal_bytes)?; @@ -147,7 +149,6 @@ impl Proof { public_keys: Vec, enc_shares: &[EncryptedShare], ) -> Result<(), SSError> { - validate_threshold(threshold, total)?; expect_equality!( enc_shares.len(), public_keys.len(), diff --git a/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs b/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs index e83d8070..31cca4e1 100644 --- a/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs +++ b/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs @@ -459,7 +459,7 @@ pub mod tests { macro_rules! check { ($secret_share: ident, $secret_share_comm: ident, $comp_share: ident, $comp_share_proof: ident, $deal_func: ident, $secret_group: ident, $other_group: ident, $pairing: tt, $ck_secret: expr, $ck_poly: expr) => { let base = $other_group::rand(&mut rng); - let mut checked_serialization = true; + let mut checked_serialization = false; for (threshold, total) in vec![ (2, 2), (2, 3), diff --git a/secret_sharing_and_dkg/src/distributed_dlog_check/semi_honest.rs b/secret_sharing_and_dkg/src/distributed_dlog_check/semi_honest.rs index 7c250f04..69cd23b8 100644 --- a/secret_sharing_and_dkg/src/distributed_dlog_check/semi_honest.rs +++ b/secret_sharing_and_dkg/src/distributed_dlog_check/semi_honest.rs @@ -242,7 +242,7 @@ pub mod tests { fn check(rng: &mut StdRng, ck: &G) { let base = G::rand(rng); let share_comm_ck = G::rand(rng); - let mut checked_serialization = true; + let mut checked_serialization = false; for (threshold, total) in vec![ (2, 2), (2, 3), diff --git a/secret_sharing_and_dkg/src/error.rs b/secret_sharing_and_dkg/src/error.rs index 8727594a..ff0adbae 100644 --- a/secret_sharing_and_dkg/src/error.rs +++ b/secret_sharing_and_dkg/src/error.rs @@ -31,6 +31,8 @@ pub enum SSError { Serialization(SerializationError), UnequalNoOfSharesAndPublicKeys(usize, usize), UnexpectedNumberOfResponses(usize, usize), + MissingRound2MessageFrom(ParticipantId), + InvalidNoOfCommitments(usize, usize), } impl From for SSError { diff --git a/secret_sharing_and_dkg/src/feldman_dvss_dkg.rs b/secret_sharing_and_dkg/src/feldman_dvss_dkg.rs index e5b33f31..30729d06 100644 --- a/secret_sharing_and_dkg/src/feldman_dvss_dkg.rs +++ b/secret_sharing_and_dkg/src/feldman_dvss_dkg.rs @@ -201,7 +201,7 @@ pub mod tests { let g2 = G2::rand(&mut rng); fn check(rng: &mut StdRng, g: &G) { - let mut checked_serialization = true; + let mut checked_serialization = false; for (threshold, total) in vec![ (2, 2), (2, 3), diff --git a/secret_sharing_and_dkg/src/feldman_vss.rs b/secret_sharing_and_dkg/src/feldman_vss.rs index 726c379e..70bb16c5 100644 --- a/secret_sharing_and_dkg/src/feldman_vss.rs +++ b/secret_sharing_and_dkg/src/feldman_vss.rs @@ -1,4 +1,9 @@ //! Feldman Verifiable Secret Sharing Scheme. Based on the paper [A practical scheme for non-interactive verifiable secret sharing](https://www.cs.umd.edu/~gasarch/TOPICS/secretsharing/feldmanVSS.pdf) +//! The scheme works as follows for threshold `t` and total `n`: +//! 1. Dealer samples a random `t-1` degree polynomial `f = a_0 + a_1*x + a_2*x^2 + ... + a_{t-1}*x^{t-1}` such that `f(0) = a_0 = s` where `s` is the secret. +//! 2. Dealer commits to coefficients of `f` as `C = [c_0, c_1, ..., c_{t-1}] = [g*a_0, g*a_1, ..., g*a_{t-1}]` and broadcasts `C` +//! 3. Dealer creates the `n` shares as `[f(1), f(2), ..., f(n)]` and gives `f(i)` to party `P_i`. +//! 4. Each party `P_i` verifiers its share as `g*f(i) == c_0 + c_1*i + c_2*i^2 + ... + c_{t-1} * i^{t-1}` use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; use ark_ff::PrimeField; @@ -70,6 +75,8 @@ pub(crate) fn commit_to_poly( impl Share { /// Executed by each participant to verify its share received from the dealer. + /// Also, should be called by the "reconstructor" to verify that each of the share being used in + /// reconstruction is a valid share. pub fn verify<'a, G: AffineRepr>( &self, commitment_coeffs: &CommitmentToCoefficients, @@ -80,9 +87,9 @@ impl Share { return Err(SSError::BelowThreshold(self.threshold, len)); } let powers = powers(&G::ScalarField::from(self.id as u64), self.threshold as u32); - if G::Group::msm_unchecked(&commitment_coeffs.0, &powers) - != ck.into().mul_bigint(self.share.into_bigint()) - { + let l = G::Group::msm_unchecked(&commitment_coeffs.0, &powers); + let r = *ck.into() * self.share; + if l != r { return Err(SSError::InvalidShare); } Ok(()) @@ -92,22 +99,22 @@ impl Share { #[cfg(test)] pub mod tests { use super::*; - use ark_bls12_381::Bls12_381; - use ark_ec::pairing::Pairing; + use ark_bls12_381::{G1Affine, G2Affine}; use ark_ff::One; - use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress}; use ark_std::rand::{rngs::StdRng, SeedableRng}; + use std::time::Instant; use test_utils::test_serialization; #[test] fn feldman_verifiable_secret_sharing() { let mut rng = StdRng::seed_from_u64(0u64); - let g1 = ::G1Affine::rand(&mut rng); - let g2 = ::G2Affine::rand(&mut rng); + let g1 = G1Affine::rand(&mut rng); + let g2 = G2Affine::rand(&mut rng); fn check(rng: &mut StdRng, g: &G) { - let mut checked_serialization = true; + let mut checked_serialization = false; for (threshold, total) in vec![ (2, 2), (2, 3), @@ -127,9 +134,21 @@ pub mod tests { (7, 10), (7, 15), ] { + println!("For {}-of-{} sharing", threshold, total); + let start = Instant::now(); let (secret, shares, commitments, _) = deal_random_secret::<_, G>(rng, threshold as ShareId, total as ShareId, g) .unwrap(); + println!( + "Time to create shares and commitments {:?}", + start.elapsed() + ); + println!( + "Commitment size is {} bytes", + commitments.serialized_size(Compress::Yes) + ); + + let mut noted_time = false; for share in &shares.0 { // Wrong share fails to verify @@ -138,9 +157,15 @@ pub mod tests { assert!(wrong_share.verify(&commitments, g).is_err()); // Correct share verifies + let start = Instant::now(); share.verify(&commitments, g).unwrap(); + if !noted_time { + println!("Time to verify share is {:?}", start.elapsed()); + noted_time = true; + } } + // Its assumed that reconstructor verifies each share before calling `reconstruct_secret` assert_eq!(shares.reconstruct_secret().unwrap(), secret); if !checked_serialization { diff --git a/secret_sharing_and_dkg/src/frost_dkg.rs b/secret_sharing_and_dkg/src/frost_dkg.rs index 83bffe09..269014c8 100644 --- a/secret_sharing_and_dkg/src/frost_dkg.rs +++ b/secret_sharing_and_dkg/src/frost_dkg.rs @@ -149,6 +149,10 @@ impl Round1State { if !msg.comm_coeffs.supports_threshold(self.threshold) { return Err(SSError::DoesNotSupportThreshold(self.threshold)); } + if self.coeff_comms.contains_key(&msg.sender_id) { + return Err(SSError::AlreadyProcessedFromSender(msg.sender_id)); + } + let pk_gen = pk_gen.into(); // Verify Schnorr proof let mut challenge_bytes = vec![]; @@ -257,16 +261,17 @@ pub mod tests { UniformRand, }; use blake2::Blake2b512; + use std::time::{Duration, Instant}; use test_utils::{test_serialization, G1, G2}; #[test] - fn frost_distributed_key_generation() { + fn distributed_key_generation() { let mut rng = StdRng::seed_from_u64(0u64); let g1 = G1::rand(&mut rng); let g2 = G2::rand(&mut rng); fn check(rng: &mut StdRng, pub_key_base: &G) { - let mut checked_serialization = true; + let mut checked_serialization = false; for (threshold, total) in vec![ (2, 2), (2, 3), @@ -293,8 +298,13 @@ pub mod tests { let mut secrets = vec![]; let schnorr_ctx = b"test-ctx"; + println!("For {}-of-{}", threshold, total); + let mut round1_time = Duration::default(); + let mut round2_time = Duration::default(); + // Each participant starts Round 1 for i in 1..=total { + let start = Instant::now(); let (round1_state, round1_msg) = Round1State::start_with_random_secret::( rng, @@ -305,6 +315,8 @@ pub mod tests { pub_key_base, ) .unwrap(); + round1_time += start.elapsed(); + secrets.push(round1_state.secret.clone()); all_round1_states.push(round1_state); all_round1_msgs.push(round1_msg); @@ -348,6 +360,7 @@ pub mod tests { ) .is_err()); + let start = Instant::now(); // Process valid message all_round1_states[i] .add_received_message::( @@ -356,6 +369,7 @@ pub mod tests { pub_key_base, ) .unwrap(); + round1_time += start.elapsed(); } } @@ -367,7 +381,9 @@ pub mod tests { // Each participant ends Round 1 and begins Round 2 for i in 0..total { assert_eq!(all_round1_states[i].total_participants(), total); + let start = Instant::now(); let (round2, shares) = all_round1_states[i].clone().finish().unwrap(); + round1_time += start.elapsed(); all_round2_states.push(round2); all_shares.push(shares); } @@ -422,6 +438,7 @@ pub mod tests { ) .is_err()); + let start = Instant::now(); all_round2_states[i] .add_received_share( (j + 1) as ParticipantId, @@ -429,6 +446,7 @@ pub mod tests { pub_key_base, ) .unwrap(); + round2_time += start.elapsed(); // Adding duplicate share not allowed assert!(all_round2_states[i] @@ -456,8 +474,10 @@ pub mod tests { let mut all_pk = vec![]; let mut final_shares = vec![]; for i in 0..total { + let start = Instant::now(); let (share, pk, t_pk) = all_round2_states[i].clone().finish(pub_key_base).unwrap(); + round2_time += start.elapsed(); assert_eq!( pub_key_base .mul_bigint(share.share.into_bigint()) @@ -492,6 +512,9 @@ pub mod tests { ) ); checked_serialization = true; + + println!("Time taken for round 1 {:?}", round1_time); + println!("Time taken for round 2 {:?}", round2_time); } } diff --git a/secret_sharing_and_dkg/src/gennaro_dkg.rs b/secret_sharing_and_dkg/src/gennaro_dkg.rs index f7e279b7..aef7302d 100644 --- a/secret_sharing_and_dkg/src/gennaro_dkg.rs +++ b/secret_sharing_and_dkg/src/gennaro_dkg.rs @@ -3,10 +3,11 @@ //! secret and share it using Pedersen VSS and in Phase 2 participants distribute commitments as per //! Feldman VSS and generate the public key at the end. The public key is assumed to be of the form //! `G*x` where `x` is the secret key and `G` is the group generator. +//! use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::Zero; -use ark_poly::univariate::DensePolynomial; +use ark_poly::{univariate::DensePolynomial, Polynomial}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{collections::BTreeMap, rand::RngCore, vec::Vec, UniformRand}; use dock_crypto_utils::commitment::PedersenCommitmentKey; @@ -26,42 +27,44 @@ pub struct Phase1 { pub secret: G::ScalarField, pub accumulator: pedersen_dvss::SharesAccumulator, pub poly: DensePolynomial, + /// This is kept to reply to a malicious complaining party, i.e. for step 1.c from the protocol + pub blinding_poly: DensePolynomial, } /// In phase 2, Each participant runs Feldman VSS (only partly) over the same secret and polynomial /// used in Phase 1 where it distributes the commitments to other participants /// The commitments created during Phase1 and Phase2 could be in different groups for efficiency like when /// the public key is supposed to be in group G2, but the commitments in Phase1 can still be in group G1. -/// Thus GP1 is the commitment group from Phase 1 and GP2 is in Phase 2. +/// Thus CMG is the commitment group from Phase 1 and PKG is the public key group Phase 2. #[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Phase2, GP1: AffineRepr> { +pub struct Phase2, CMG: AffineRepr> { pub id: ParticipantId, - pub secret: GP2::ScalarField, + pub secret: PKG::ScalarField, /// Shares from Phase 1. Only participants which submitted shares in Phase 1 will be allowed in /// Phase 2. This is the set "QUAL" from the paper - pub shares_phase_1: BTreeMap>, - pub final_share: VerifiableShare, + pub shares_phase_1: BTreeMap>, + pub final_share: VerifiableShare, /// Commitment to coefficients of the polynomial created during Phase 1. - pub coeff_comms: BTreeMap>, + pub coeff_comms: BTreeMap>, } -impl Phase1 { +impl Phase1 { /// Start Phase 1 with a randomly generated secret. pub fn start_with_random_secret( rng: &mut R, participant_id: ParticipantId, threshold: ShareId, total: ShareId, - comm_key: &PedersenCommitmentKey, + comm_key: &PedersenCommitmentKey, ) -> Result< ( Self, - VerifiableShares, - CommitmentToCoefficients, + VerifiableShares, + CommitmentToCoefficients, ), SSError, > { - let secret = GP1::ScalarField::rand(rng); + let secret = CMG::ScalarField::rand(rng); Self::start_with_given_secret(rng, participant_id, secret, threshold, total, comm_key) } @@ -69,20 +72,20 @@ impl Phase1 { pub fn start_with_given_secret( rng: &mut R, participant_id: ParticipantId, - secret: GP1::ScalarField, + secret: CMG::ScalarField, threshold: ShareId, total: ShareId, - comm_key: &PedersenCommitmentKey, + comm_key: &PedersenCommitmentKey, ) -> Result< ( Self, - VerifiableShares, - CommitmentToCoefficients, + VerifiableShares, + CommitmentToCoefficients, ), SSError, > { - let (_, shares, commitments, poly, _) = - pedersen_vss::deal_secret::<_, GP1>(rng, secret, threshold, total, comm_key)?; + let (_, shares, commitments, poly, blinding_poly) = + pedersen_vss::deal_secret::<_, CMG>(rng, secret, threshold, total, comm_key)?; let mut accumulator = pedersen_dvss::SharesAccumulator::new(participant_id, threshold); accumulator.add_self_share( shares.0[(participant_id as usize) - 1].clone(), @@ -93,6 +96,7 @@ impl Phase1 { secret, accumulator, poly, + blinding_poly, }, shares, commitments, @@ -103,16 +107,16 @@ impl Phase1 { pub fn add_received_share( &mut self, sender_id: ParticipantId, - share: VerifiableShare, - commitment_coeffs: CommitmentToCoefficients, - comm_key: &PedersenCommitmentKey, + share: VerifiableShare, + commitment_coeffs: CommitmentToCoefficients, + comm_key: &PedersenCommitmentKey, ) -> Result<(), SSError> { self.accumulator .add_received_share(sender_id, share, commitment_coeffs, comm_key)?; Ok(()) } - /// Called when got >= `threshold` complaints for `participant_id` + /// Called when got >= `threshold` complaints for `participant_id` and disqualifying a participant pub fn remove_participant(&mut self, participant_id: ParticipantId) -> Result<(), SSError> { if self.self_id() == participant_id { return Err(SSError::CannotRemoveSelf(participant_id)); @@ -122,19 +126,21 @@ impl Phase1 { Ok(()) } - /// Mark Phase 1 as over and initialize Phase 2. - pub fn finish>( + /// Mark Phase 1 as over and initialize Phase 2. Call this only when confident that no more complaints + /// will be received or need to be processed. Its assumed that all participants in `self.accumulator` + /// are honest by now + pub fn finish>( self, - ped_comm_key: &PedersenCommitmentKey, - fel_comm_key: &GP2, - ) -> Result<(Phase2, CommitmentToCoefficients), SSError> { + ped_comm_key: &PedersenCommitmentKey, + fel_comm_key: &PKG, + ) -> Result<(Phase2, CommitmentToCoefficients), SSError> { let id = self.self_id(); let shares_phase_1 = self.accumulator.shares.clone(); let final_share = self.accumulator.finalize(ped_comm_key)?; - // If `GP1` and `GP2`, An optimization to avoid computing `commitments` could be to not do an MSM in `Phase1::start..` and + // If `CMG` and `PKG` are same, an optimization to avoid computing `commitments` could be to not do an MSM in `Phase1::start..` and // preserve the computation `g*a_i` where `a_i` are the coefficients of the polynomial - let commitments: CommitmentToCoefficients = + let commitments: CommitmentToCoefficients = feldman_vss::commit_to_poly(&self.poly, fel_comm_key).into(); let mut coeff_comms = BTreeMap::new(); coeff_comms.insert(id, commitments.clone()); @@ -150,18 +156,42 @@ impl Phase1 { )) } + /// Called by the participant to respond to complaint by the participant with id `participant_id` + pub fn generate_share_for_participant( + &self, + participant_id: ParticipantId, + ) -> Result, SSError> { + if self.self_id() == participant_id { + return Err(SSError::SenderIdSameAsReceiver( + self.self_id(), + participant_id, + )); + } + if participant_id == 0 { + return Err(SSError::InvalidParticipantId(0)); + } + let id = CMG::ScalarField::from(participant_id); + let share = VerifiableShare { + id: participant_id, + threshold: self.poly.degree() as u16 + 1, + secret_share: self.poly.evaluate(&id), + blinding_share: self.blinding_poly.evaluate(&id), + }; + Ok(share) + } + pub fn self_id(&self) -> ParticipantId { self.accumulator.participant_id } } -impl, GP1: AffineRepr> Phase2 { +impl, CMG: AffineRepr> Phase2 { /// Called by a participant when it receives commitments from others. pub fn add_received_commitments( &mut self, sender_id: ParticipantId, - commitment_coeffs: CommitmentToCoefficients, - ck: &GP2, + commitment_coeffs: CommitmentToCoefficients, + ck: &PKG, ) -> Result<(), SSError> { if self.id == sender_id { return Err(SSError::SenderIdSameAsReceiver(sender_id, self.id)); @@ -169,6 +199,9 @@ impl, GP1: AffineRepr> Phase2, GP1: AffineRepr> Phase2 Result<(GP2::ScalarField, GP2, GP2), SSError> { + pub fn finish(self) -> Result<(PKG::ScalarField, PKG, PKG), SSError> { if self.coeff_comms.len() != self.shares_phase_1.len() { return Err(SSError::MissingSomeParticipants( (self.shares_phase_1.len() - self.coeff_comms.len()) as ParticipantId, @@ -196,34 +229,43 @@ impl, GP1: AffineRepr> Phase2 Option<&VerifiableShare> { + self.shares_phase_1.get(&id) + } } #[cfg(test)] pub mod tests { use super::*; - use ark_bls12_381::Bls12_381; + use ark_bls12_381::{Bls12_381, Fr}; use ark_ec::{pairing::Pairing, CurveGroup}; use ark_ff::PrimeField; use ark_std::rand::{rngs::StdRng, SeedableRng}; use blake2::Blake2b512; + use std::time::{Duration, Instant}; type G1 = ::G1Affine; #[test] - fn gennaro_distributed_key_generation() { + fn distributed_key_generation() { let mut rng = StdRng::seed_from_u64(0u64); let ped_comm_key = PedersenCommitmentKey::::new::(b"test"); - let fed_comm_key = ::G1Affine::rand(&mut rng); - let fed_comm_key_g2 = ::G2Affine::rand(&mut rng); + let feld_comm_key = ::G1Affine::rand(&mut rng); + let feld_comm_key_g2 = ::G2Affine::rand(&mut rng); - fn check>( + fn check>( rng: &mut StdRng, - ped_comm_key: &PedersenCommitmentKey, - fed_comm_key: &GP2, + ped_comm_key: &PedersenCommitmentKey, + feld_comm_key: &PKG, ) { for (threshold, total) in vec![ (2, 2), @@ -251,8 +293,13 @@ pub mod tests { let mut all_comms1 = vec![]; let mut all_comms2 = vec![]; + println!("For {}-of-{}", threshold, total); + let mut phase1_time = Duration::default(); + let mut phase2_time = Duration::default(); + // Each participant starts Phase1 for i in 1..=total { + let start = Instant::now(); let (phase1, shares, comms) = Phase1::start_with_random_secret( rng, i as ParticipantId, @@ -261,12 +308,24 @@ pub mod tests { ped_comm_key, ) .unwrap(); + phase1_time += start.elapsed(); + + for s in &shares.0 { + if i as ShareId != s.id { + assert_eq!(*s, phase1.generate_share_for_participant(s.id).unwrap()); + } else { + assert!(phase1.generate_share_for_participant(s.id).is_err()); + } + } + assert!(phase1.generate_share_for_participant(0).is_err()); + all_secrets.push(phase1.secret.clone()); all_phase1s.push(phase1); all_shares.push(shares); all_comms1.push(comms); } + let start = Instant::now(); // Each participant receives shares and commitments during Phase1 for i in 0..total { for j in 0..total { @@ -287,12 +346,14 @@ pub mod tests { for i in 0..total { let (phase2, comms) = all_phase1s[i] .clone() - .finish(ped_comm_key, fed_comm_key) + .finish(ped_comm_key, feld_comm_key) .unwrap(); all_phase2s.push(phase2); all_comms2.push(comms); } + phase1_time += start.elapsed(); + let start = Instant::now(); // Each participant receives shares and commitments during Phase2 for i in 0..total { for j in 0..total { @@ -301,35 +362,175 @@ pub mod tests { .add_received_commitments( (j + 1) as ParticipantId, all_comms2[j].clone(), - fed_comm_key, + feld_comm_key, ) .unwrap(); } } } + phase2_time += start.elapsed(); // Each participant ends Phase2 and ends up with his own keys and the threshold public key let mut tk = None; for i in 0..total { + let start = Instant::now(); let (own_sk, own_pk, threshold_pk) = all_phase2s[i].clone().finish().unwrap(); + phase2_time += start.elapsed(); assert_eq!(own_sk, all_secrets[i]); assert_eq!( own_pk, - fed_comm_key.mul_bigint(own_sk.into_bigint()).into_affine() + feld_comm_key.mul_bigint(own_sk.into_bigint()).into_affine() ); - if i == 0 { + if tk.is_none() { tk = Some(threshold_pk); } else { // All generate the same threshold key assert_eq!(tk, Some(threshold_pk)) } } + + assert_eq!( + tk.unwrap(), + (*feld_comm_key * all_secrets.into_iter().sum::()) + .into_affine() + ); + + println!("Time taken for phase 1 {:?}", phase1_time); + println!("Time taken for phase 2 {:?}", phase2_time); } } // When both Pedersen VSS and Feldman VSS have commitments in group G1 - check(&mut rng, &ped_comm_key, &fed_comm_key); + check(&mut rng, &ped_comm_key, &feld_comm_key); // When both Pedersen VSS has commitments in group G1 and Feldman VSS in G2 - check(&mut rng, &ped_comm_key, &fed_comm_key_g2); + check(&mut rng, &ped_comm_key, &feld_comm_key_g2); + } + + #[test] + fn distributed_key_generation_with_failures() { + let mut rng = StdRng::seed_from_u64(0u64); + let ped_comm_key = PedersenCommitmentKey::::new::(b"test"); + let feld_comm_key = ::G1Affine::rand(&mut rng); + + let threshold = 5; + let total = 10; + + let faulty_phase_1_id: ParticipantId = 2; + let faulty_phase_2_id: ParticipantId = 3; + + let mut all_phase1s = vec![]; + let mut all_phase2s = BTreeMap::new(); + let mut all_secrets = vec![]; + let mut all_shares = vec![]; + let mut all_comms1 = vec![]; + let mut all_comms2 = BTreeMap::new(); + + // Each participant starts Phase1 + for i in 1..=total { + let (phase1, shares, comms) = Phase1::start_with_random_secret( + &mut rng, + i as ParticipantId, + threshold as ShareId, + total as ShareId, + &ped_comm_key, + ) + .unwrap(); + + all_secrets.push(phase1.secret.clone()); + all_phase1s.push(phase1); + all_shares.push(shares); + all_comms1.push(comms); + } + + // Each participant receives shares and commitments during Phase1 + for i in 0..total { + for j in 0..total { + if i != j { + all_phase1s[i] + .add_received_share( + (j + 1) as ParticipantId, + all_shares[j].0[i].clone(), + all_comms1[j].clone(), + &ped_comm_key, + ) + .unwrap(); + } + } + } + + // `threshold` number of complaints received against 1 party, so all others remove him. For step 1.c from the protocol + for i in 1..=total { + if i != faulty_phase_1_id as usize { + all_phase1s[i - 1] + .remove_participant(faulty_phase_1_id) + .unwrap(); + let (phase2, comms) = all_phase1s[i - 1] + .clone() + .finish(&ped_comm_key, &feld_comm_key) + .unwrap(); + all_phase2s.insert(i as ParticipantId, phase2); + all_comms2.insert(i as ParticipantId, comms); + } + } + + // Each participant receives shares and commitments during Phase2 + for i in 1..=total { + if i == faulty_phase_1_id as usize { + continue; + } + for j in 1..=total { + if j == faulty_phase_1_id as usize { + continue; + } + if i != j { + let p_2 = all_phase2s.get_mut(&(i as ParticipantId)).unwrap(); + let c_2 = all_comms2.get(&(j as ParticipantId)).unwrap(); + p_2.add_received_commitments(j as ParticipantId, c_2.clone(), &feld_comm_key) + .unwrap(); + } + } + } + + // Say a party misbehaves in phase 2, i.e. check in 4.b fails, then others should be able to recover + // its secret + let mut faulty_party_shares = vec![]; + let faulty_party = &all_phase1s[faulty_phase_2_id as usize - 1]; + for i in 1..=total { + if i == faulty_phase_1_id as usize || i == faulty_phase_2_id as usize { + continue; + } + let p_2 = all_phase2s.get(&(i as ParticipantId)).unwrap(); + let share = p_2.get_phase_1_share_of_party(faulty_phase_2_id).unwrap(); + faulty_party_shares.push(share.clone()); + } + let malicious_party_shares = VerifiableShares(faulty_party_shares); + assert_eq!( + malicious_party_shares.reconstruct_secret().unwrap(), + ( + faulty_party.poly.evaluate(&Fr::zero()), + faulty_party.blinding_poly.evaluate(&Fr::zero()) + ) + ); + + // Each participant ends Phase2 and ends up with his own keys and the threshold public key + let mut tk = None; + for i in 1..=total { + if i == faulty_phase_1_id as usize || i == faulty_phase_2_id as usize { + continue; + } + let p_2 = all_phase2s.get_mut(&(i as ParticipantId)).unwrap(); + let (own_sk, own_pk, threshold_pk) = p_2.clone().finish().unwrap(); + assert_eq!(own_sk, all_secrets[i - 1]); + assert_eq!( + own_pk, + feld_comm_key.mul_bigint(own_sk.into_bigint()).into_affine() + ); + if tk.is_none() { + tk = Some(threshold_pk); + } else { + // All generate the same threshold key + assert_eq!(tk, Some(threshold_pk)) + } + } } } diff --git a/secret_sharing_and_dkg/src/lib.rs b/secret_sharing_and_dkg/src/lib.rs index c60dd431..08e2d622 100644 --- a/secret_sharing_and_dkg/src/lib.rs +++ b/secret_sharing_and_dkg/src/lib.rs @@ -1,5 +1,25 @@ #![cfg_attr(not(feature = "std"), no_std)] +//! # Secret sharing and distributed key generation +//! +//! Implements Secret Sharing (SS), Verifiable Secret Sharing (VSS), Distributed Verifiable Secret Sharing (DVSS), Distributed +//! Key Generation (DKG) and Publicly Verifiable Secret Sharing (PVSS) algorithms. DVSS and DKG do not require a trusted dealer. +//! Also implements a distributed discrete log check. +//! +//! +//! 1. [Shamir secret sharing (Requires a trusted dealer)](./src/shamir_ss.rs) +//! 1. [Pedersen Verifiable Secret Sharing](./src/pedersen_vss.rs) +//! 1. [Pedersen Distributed Verifiable Secret Sharing](./src/pedersen_dvss.rs) +//! 1. [Feldman Verifiable Secret Sharing](./src/feldman_vss.rs) +//! 1. [Feldman Distributed Verifiable Secret Sharing](./src/feldman_dvss_dkg.rs) +//! 1. [Gennaro DKG from the paper Secure Distributed Key Generation for Discrete-Log Based Cryptosystems](./src/gennaro_dkg.rs) +//! 1. [Distributed Key Generation from FROST](./src/frost_dkg.rs) +//! 1. [Distributed discrete log (DLOG) check](./src/distributed_dlog_check) +//! 1. [Publicly Verifiable Secret Sharing](./src/baghery_pvss) +//! + +pub mod abcp_dkg; +pub mod baghery_feldman_vss; pub mod baghery_pvss; pub mod common; pub mod distributed_dlog_check; diff --git a/secret_sharing_and_dkg/src/pedersen_vss.rs b/secret_sharing_and_dkg/src/pedersen_vss.rs index 0cc82e98..3e26e32c 100644 --- a/secret_sharing_and_dkg/src/pedersen_vss.rs +++ b/secret_sharing_and_dkg/src/pedersen_vss.rs @@ -8,7 +8,7 @@ //! - Dealer sends `(F(i), G(i))` to participant `i` //! - Each participant verifies `C(F(i), G(i)) = C_0 * C_1*i * C_2*{i^2} * ... C_{k-1}*{k-1}` -use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; +use ark_ec::{AffineRepr, VariableBaseMSM}; use ark_ff::PrimeField; use ark_poly::univariate::DensePolynomial; @@ -72,11 +72,12 @@ pub fn deal_secret( // Create a random blinding and shares of that let (t, t_shares, t_poly) = shamir_ss::deal_random_secret(rng, threshold, total)?; // Create Pedersen commitments where each commitment commits to a coefficient of the polynomial `s_poly` and with blinding as coefficient of the polynomial `t_poly` - let coeff_comms = G::Group::normalize_batch( - &cfg_into_iter!(0..threshold as usize) - .map(|i| comm_key.commit_as_projective(&s_poly.coeffs[i], &t_poly.coeffs[i])) - .collect::>(), - ); + // let coeff_comms = G::Group::normalize_batch( + // &cfg_into_iter!(0..threshold as usize) + // .map(|i| comm_key.commit_as_projective(&s_poly.coeffs[i], &t_poly.coeffs[i])) + // .collect::>(), + // ); + let coeff_comms = comm_key.commit_to_a_batch(&s_poly.coeffs, &t_poly.coeffs); Ok(( t, @@ -99,6 +100,8 @@ pub fn deal_secret( impl VerifiableShare { /// Executed by each participant to verify its share received from the dealer. + /// Also, should be called by the "reconstructor" to verify that each of the share being used in + /// reconstruction is a valid share. pub fn verify>( &self, commitment_coeffs: &CommitmentToCoefficients, @@ -156,6 +159,7 @@ pub mod tests { use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::{rngs::StdRng, SeedableRng}; use blake2::Blake2b512; + use std::time::Instant; use test_utils::{test_serialization, G1, G2}; #[test] @@ -185,6 +189,8 @@ pub mod tests { (7, 10), (7, 15), ] { + println!("For {}-of-{} sharing", threshold, total); + let start = Instant::now(); let (secret, blinding, shares, commitments, _, _) = deal_random_secret::<_, G>( rng, threshold as ShareId, @@ -192,7 +198,12 @@ pub mod tests { &comm_key, ) .unwrap(); + println!( + "Time to create shares and commitments {:?}", + start.elapsed() + ); + let mut noted_time = false; for share in &shares.0 { // Wrong share fails to verify let mut wrong_share = share.clone(); @@ -204,10 +215,18 @@ pub mod tests { assert!(wrong_share.verify(&commitments, &comm_key).is_err()); // Correct share verifies + let start = Instant::now(); share.verify(&commitments, &comm_key).unwrap(); + if !noted_time { + println!("Time to verify commitments is {:?}", start.elapsed()); + noted_time = true; + } } + // Its assumed that reconstructor verifies each share before calling `reconstruct_secret` + let start = Instant::now(); let (s, t) = shares.reconstruct_secret().unwrap(); + println!("Time to reconstruct secret {:?}", start.elapsed()); assert_eq!(s, secret); assert_eq!(t, blinding); diff --git a/short_group_sig/Cargo.toml b/short_group_sig/Cargo.toml index c3dc20ef..08ee6014 100644 --- a/short_group_sig/Cargo.toml +++ b/short_group_sig/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "short_group_sig" -version = "0.2.0" +version = "0.3.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -19,8 +19,8 @@ rayon = {workspace = true, optional = true} serde.workspace = true serde_with.workspace = true zeroize.workspace = true -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } [dev-dependencies] blake2.workspace = true diff --git a/smc_range_proof/Cargo.toml b/smc_range_proof/Cargo.toml index 446095d6..3efa18e9 100644 --- a/smc_range_proof/Cargo.toml +++ b/smc_range_proof/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "smc_range_proof" -version = "0.4.0" +version = "0.5.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -16,9 +16,9 @@ ark-serialize.workspace = true digest.workspace = true zeroize.workspace = true rayon = {workspace = true, optional = true} -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -short_group_sig = { version = "0.2.0", default-features = false, path = "../short_group_sig" } -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +short_group_sig = { version = "0.3.0", default-features = false, path = "../short_group_sig" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } [dev-dependencies] blake2.workspace = true diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 5591775e..a8dd8c84 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,6 @@ license.workspace = true [dependencies] bbs_plus = { default-features = false, path = "../bbs_plus" } -schnorr_pok = { default-features = false, path = "../schnorr_pok" } vb_accumulator = { default-features = false, path = "../vb_accumulator" } ark-ff.workspace = true ark-ec.workspace = true @@ -15,12 +14,10 @@ ark-std.workspace = true ark-bls12-381.workspace = true ark-serialize.workspace = true blake2.workspace = true -proof_system = { default-features = false, path = "../proof_system"} kvac = { default-features = false, path = "../kvac"} oblivious_transfer_protocols = { default-features = false, path = "../oblivious_transfer"} [features] default = ["parallel"] -parallel = ["proof_system/parallel", "kvac/parallel", "oblivious_transfer_protocols/parallel"] -wasmer-js = ["proof_system/wasmer-js"] -wasmer-sys = ["proof_system/wasmer-sys"] \ No newline at end of file +std = ["ark-ff/std", "ark-ec/std", "ark-std/std", "ark-serialize/std", "bbs_plus/std", "bbs_plus/std", "vb_accumulator/std", "kvac/std", "oblivious_transfer_protocols/std"] +parallel = ["bbs_plus/parallel", "vb_accumulator/parallel", "kvac/parallel", "oblivious_transfer_protocols/parallel"] diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 713863a6..fa2f89f7 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dock_crypto_utils" -version = "0.18.0" +version = "0.19.0" edition.workspace = true authors.workspace = true license.workspace = true diff --git a/utils/src/commitment.rs b/utils/src/commitment.rs index ec993440..a8ba4882 100644 --- a/utils/src/commitment.rs +++ b/utils/src/commitment.rs @@ -1,13 +1,17 @@ use crate::{ - concat_slices, hashing_utils::affine_group_elem_from_try_and_incr, serde_utils::ArkObjectBytes, + concat_slices, hashing_utils::affine_group_elem_from_try_and_incr, msm::WindowTable, + serde_utils::ArkObjectBytes, }; -use ark_ec::AffineRepr; +use ark_ec::{AffineRepr, CurveGroup}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::vec::Vec; +use ark_std::{cfg_into_iter, vec::Vec}; use digest::Digest; use serde::{Deserialize, Serialize}; use serde_with::serde_as; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + /// A Pedersen commitment key `(g, h)`. The Pedersen commitment will be `g * m + h * r` with opening `(m, r)` #[serde_as] #[derive( @@ -30,7 +34,24 @@ impl PedersenCommitmentKey { /// Commit to a message pub fn commit(&self, message: &G::ScalarField, randomness: &G::ScalarField) -> G { - (self.g * message + self.h * randomness).into() + self.commit_as_projective(message, randomness).into() + } + + /// Commit to a batch of messages and output commitments corresponding to each message. + pub fn commit_to_a_batch( + &self, + messages: &[G::ScalarField], + randomness: &[G::ScalarField], + ) -> Vec { + assert_eq!(messages.len(), randomness.len()); + let g_table = WindowTable::new(messages.len(), self.g.into_group()); + let h_table = WindowTable::new(randomness.len(), self.h.into_group()); + G::Group::normalize_batch( + &cfg_into_iter!(messages) + .zip(cfg_into_iter!(randomness)) + .map(|(m_i, r_i)| g_table.multiply(m_i) + h_table.multiply(r_i)) + .collect::>(), + ) } pub fn commit_as_projective( diff --git a/vb_accumulator/Cargo.toml b/vb_accumulator/Cargo.toml index beacc55a..a4086684 100644 --- a/vb_accumulator/Cargo.toml +++ b/vb_accumulator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vb_accumulator" -version = "0.24.0" +version = "0.25.0" edition.workspace = true authors.workspace = true license.workspace = true @@ -22,12 +22,12 @@ rayon = {workspace = true, optional = true} serde.workspace = true serde_with.workspace = true zeroize.workspace = true -schnorr_pok = { version = "0.18.0", default-features = false, path = "../schnorr_pok" } -dock_crypto_utils = { version = "0.18.0", default-features = false, path = "../utils" } -short_group_sig = { version = "0.2.0", default-features = false, path = "../short_group_sig" } -kvac = { version = "0.3.0", default-features = false, path = "../kvac" } -oblivious_transfer_protocols = { version = "0.7.0", default-features = false, path = "../oblivious_transfer" } -secret_sharing_and_dkg = { version = "0.11.0", default-features = false, path = "../secret_sharing_and_dkg" } +schnorr_pok = { version = "0.19.0", default-features = false, path = "../schnorr_pok" } +dock_crypto_utils = { version = "0.19.0", default-features = false, path = "../utils" } +short_group_sig = { version = "0.3.0", default-features = false, path = "../short_group_sig" } +kvac = { version = "0.4.0", default-features = false, path = "../kvac" } +oblivious_transfer_protocols = { version = "0.8.0", default-features = false, path = "../oblivious_transfer" } +secret_sharing_and_dkg = { version = "0.12.0", default-features = false, path = "../secret_sharing_and_dkg" } [dev-dependencies] blake2.workspace = true