Skip to content

Commit

Permalink
VID share needs old epoch payload commitment
Browse files Browse the repository at this point in the history
  • Loading branch information
lukaszrzasik committed Dec 19, 2024
1 parent 3110ee5 commit 3ff768c
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 27 deletions.
13 changes: 9 additions & 4 deletions crates/task-impls/src/quorum_vote/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,13 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES> + 'static, V: Versions> Handl
}
}
HotShotEvent::VidShareValidated(share) => {
let vid_payload_commitment = &share.data.payload_commitment;
let vid_payload_commitment = if let Some(ref data_epoch_payload_commitment) =
share.data.data_epoch_payload_commitment
{
data_epoch_payload_commitment
} else {
&share.data.payload_commitment
};
vid_share = Some(share.clone());
if let Some(ref comm) = payload_commitment {
if vid_payload_commitment != comm {
Expand Down Expand Up @@ -546,15 +552,14 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES>, V: Versions> QuorumVoteTaskS

// Validate the VID share.
let payload_commitment = &disperse.data.payload_commitment;
let vid_epoch = disperse.data.epoch;
let target_epoch = disperse.data.target_node_epoch;

// Check that the signature is valid
ensure!(
sender.validate(&disperse.signature, payload_commitment.as_ref()),
"VID share signature is invalid"
);

let vid_epoch = disperse.data.epoch;
let target_epoch = disperse.data.target_epoch;
// ensure that the VID share was sent by a DA member OR the view leader
ensure!(
self.membership
Expand Down
1 change: 1 addition & 0 deletions crates/testing/src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,7 @@ pub fn build_vid_proposal<TYPES: NodeType>(
quorum_membership,
epoch_number,
epoch_number,
None,
);

let signature =
Expand Down
1 change: 1 addition & 0 deletions crates/testing/tests/tests_1/vid_task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ async fn test_vid_task() {
&membership,
EpochNumber::new(0),
EpochNumber::new(0),
None,
);

let vid_proposal = Proposal {
Expand Down
67 changes: 44 additions & 23 deletions crates/types/src/data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,10 @@ pub struct VidDisperse<TYPES: NodeType> {
pub epoch: TYPES::Epoch,
/// Epoch to which the recipients of this VID belong to
pub target_epoch: TYPES::Epoch,
/// Block payload commitment
/// VidCommitment calculated based on the number of nodes in `target_epoch`.
pub payload_commitment: VidCommitment,
/// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition.
pub data_epoch_payload_commitment: Option<VidCommitment>,
/// A storage node's key and its corresponding VID share
pub shares: BTreeMap<TYPES::SignatureKey, VidShare>,
/// VID common data sent to all storage nodes
Expand All @@ -231,7 +233,8 @@ impl<TYPES: NodeType> VidDisperse<TYPES> {
mut vid_disperse: JfVidDisperse<VidSchemeType>,
membership: &TYPES::Membership,
target_epoch: TYPES::Epoch,
sender_epoch: TYPES::Epoch,
data_epoch: TYPES::Epoch,
data_epoch_payload_commitment: Option<VidCommitment>,
) -> Self {
let shares = membership
.committee_members(view_number, target_epoch)
Expand All @@ -244,7 +247,8 @@ impl<TYPES: NodeType> VidDisperse<TYPES> {
shares,
common: vid_disperse.common,
payload_commitment: vid_disperse.commit,
epoch: sender_epoch,
data_epoch_payload_commitment,
epoch: data_epoch,
target_epoch,
}
}
Expand All @@ -261,28 +265,41 @@ impl<TYPES: NodeType> VidDisperse<TYPES> {
membership: &Arc<TYPES::Membership>,
view: TYPES::View,
target_epoch: TYPES::Epoch,
sender_epoch: TYPES::Epoch,
data_epoch: TYPES::Epoch,
precompute_data: Option<VidPrecomputeData>,
) -> Self {
let num_nodes = membership.total_nodes(target_epoch);

let txns_clone = Arc::clone(&txns);
let vid_disperse = spawn_blocking(move || {
precompute_data
.map_or_else(
|| vid_scheme(num_nodes).disperse(Arc::clone(&txns)),
|data| vid_scheme(num_nodes).disperse_precompute(Arc::clone(&txns), &data)
|| vid_scheme(num_nodes).disperse(&txns_clone),
|data| vid_scheme(num_nodes).disperse_precompute(&txns_clone, &data)
)
.unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len()))
.unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns_clone.len()))
}).await;
let data_epoch_payload_commitment = if target_epoch == data_epoch {
None
} else {
let data_epoch_num_nodes = membership.total_nodes(data_epoch);
Some(spawn_blocking(move || {
vid_scheme(data_epoch_num_nodes).commit_only(&txns)
.unwrap_or_else(|err| panic!("VID commit_only failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len()))
}).await)
};
// Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic.
let vid_disperse = vid_disperse.unwrap();
let data_epoch_payload_commitment =
data_epoch_payload_commitment.map(|result| result.unwrap());

Self::from_membership(
view,
vid_disperse,
membership.as_ref(),
target_epoch,
sender_epoch,
data_epoch,
data_epoch_payload_commitment,
)
}
}
Expand Down Expand Up @@ -373,6 +390,7 @@ impl<TYPES: NodeType> VidDisperseShare<TYPES> {
epoch: TYPES::Epoch::new(0),
target_epoch: TYPES::Epoch::new(0),
payload_commitment: first_vid_disperse_share.payload_commitment,
data_epoch_payload_commitment: None,
common: first_vid_disperse_share.common,
shares: share_map,
};
Expand Down Expand Up @@ -416,9 +434,11 @@ pub struct VidDisperseShare2<TYPES: NodeType> {
/// The epoch number for which this VID data belongs to
pub epoch: TYPES::Epoch,
/// The epoch number to which the recipient of this VID belongs to
pub target_node_epoch: TYPES::Epoch,
pub target_epoch: TYPES::Epoch,
/// Block payload commitment
pub payload_commitment: VidCommitment,
/// VidCommitment calculated based on the number of nodes in `epoch`. Needed during epoch transition.
pub data_epoch_payload_commitment: Option<VidCommitment>,
/// A storage node's key and its corresponding VID share
pub share: VidShare,
/// VID common data sent to all storage nodes
Expand All @@ -432,8 +452,9 @@ impl<TYPES: NodeType> From<VidDisperseShare2<TYPES>> for VidDisperseShare<TYPES>
let VidDisperseShare2 {
view_number,
epoch: _,
target_node_epoch: _,
target_epoch: _,
payload_commitment,
data_epoch_payload_commitment: _,
share,
common,
recipient_key,
Expand Down Expand Up @@ -462,8 +483,9 @@ impl<TYPES: NodeType> From<VidDisperseShare<TYPES>> for VidDisperseShare2<TYPES>
Self {
view_number,
epoch: TYPES::Epoch::new(0),
target_node_epoch: TYPES::Epoch::new(0),
target_epoch: TYPES::Epoch::new(0),
payload_commitment,
data_epoch_payload_commitment: None,
share,
common,
recipient_key,
Expand All @@ -474,8 +496,6 @@ impl<TYPES: NodeType> From<VidDisperseShare<TYPES>> for VidDisperseShare2<TYPES>
impl<TYPES: NodeType> VidDisperseShare2<TYPES> {
/// Create a vector of `VidDisperseShare` from `VidDisperse`
pub fn from_vid_disperse(vid_disperse: VidDisperse<TYPES>) -> Vec<Self> {
let epoch = vid_disperse.epoch;
let target_epoch = vid_disperse.target_epoch;
vid_disperse
.shares
.into_iter()
Expand All @@ -485,8 +505,9 @@ impl<TYPES: NodeType> VidDisperseShare2<TYPES> {
view_number: vid_disperse.view_number,
common: vid_disperse.common.clone(),
payload_commitment: vid_disperse.payload_commitment,
epoch,
target_node_epoch: target_epoch,
data_epoch_payload_commitment: vid_disperse.data_epoch_payload_commitment,
epoch: vid_disperse.epoch,
target_epoch: vid_disperse.target_epoch,
})
.collect()
}
Expand Down Expand Up @@ -515,18 +536,17 @@ impl<TYPES: NodeType> VidDisperseShare2<TYPES> {
I: Iterator<Item = &'a Self>,
{
let first_vid_disperse_share = it.next()?.clone();
let epoch = first_vid_disperse_share.epoch;
let target_epoch = first_vid_disperse_share.target_node_epoch;
let mut share_map = BTreeMap::new();
share_map.insert(
first_vid_disperse_share.recipient_key,
first_vid_disperse_share.share,
);
let mut vid_disperse = VidDisperse {
view_number: first_vid_disperse_share.view_number,
epoch,
target_epoch,
epoch: first_vid_disperse_share.epoch,
target_epoch: first_vid_disperse_share.target_epoch,
payload_commitment: first_vid_disperse_share.payload_commitment,
data_epoch_payload_commitment: first_vid_disperse_share.data_epoch_payload_commitment,
common: first_vid_disperse_share.common,
shares: share_map,
};
Expand All @@ -543,8 +563,6 @@ impl<TYPES: NodeType> VidDisperseShare2<TYPES> {
pub fn to_vid_share_proposals(
vid_disperse_proposal: Proposal<TYPES, VidDisperse<TYPES>>,
) -> Vec<Proposal<TYPES, Self>> {
let epoch = vid_disperse_proposal.data.epoch;
let target_epoch = vid_disperse_proposal.data.target_epoch;
vid_disperse_proposal
.data
.shares
Expand All @@ -556,8 +574,11 @@ impl<TYPES: NodeType> VidDisperseShare2<TYPES> {
view_number: vid_disperse_proposal.data.view_number,
common: vid_disperse_proposal.data.common.clone(),
payload_commitment: vid_disperse_proposal.data.payload_commitment,
epoch,
target_node_epoch: target_epoch,
data_epoch_payload_commitment: vid_disperse_proposal
.data
.data_epoch_payload_commitment,
epoch: vid_disperse_proposal.data.epoch,
target_epoch: vid_disperse_proposal.data.target_epoch,
},
signature: vid_disperse_proposal.signature.clone(),
_pd: vid_disperse_proposal._pd,
Expand Down

0 comments on commit 3ff768c

Please sign in to comment.