Skip to content

Commit

Permalink
feat : added kzg proof calculation functions
Browse files Browse the repository at this point in the history
  • Loading branch information
ocdbytes committed Jul 9, 2024
1 parent 00fc6f5 commit fb54e77
Show file tree
Hide file tree
Showing 10 changed files with 4,340 additions and 19 deletions.
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ alloy = { version = "0.1.2", features = ["full"] }
axum = { version = "0.7.4" }
axum-macros = "0.4.1"
color-eyre = "0.6.2"
c-kzg = "1.0.0"
dotenvy = "0.15.7"
futures = "0.3.30"
mongodb = { version = "2.8.1" }
Expand Down
3 changes: 3 additions & 0 deletions crates/orchestrator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ name = "orchestrator"
path = "src/main.rs"

[dependencies]
alloy = { workspace = true }
arc-swap = { workspace = true }
async-std = "1.12.0"
async-trait = { workspace = true }
Expand All @@ -20,6 +21,7 @@ aws-sdk-s3 = { version = "1.38.0", features = ["behavior-version-latest"] }
axum = { workspace = true, features = ["macros"] }
axum-macros = { workspace = true }
bytes = "1.6.0"
c-kzg = { workspace = true }
cairo-vm = { workspace = true }
color-eyre = { workspace = true }
da-client-interface = { workspace = true }
Expand All @@ -39,6 +41,7 @@ num-bigint = { workspace = true }
num-traits = { workspace = true }
omniqueue = { workspace = true, optional = true }
prover-client-interface = { workspace = true }
rstest = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
settlement-client-interface = { workspace = true }
Expand Down
5 changes: 3 additions & 2 deletions crates/orchestrator/src/data_storage/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
mod aws_s3;
mod types;
pub mod aws_s3;
pub mod types;

use async_trait::async_trait;
use aws_sdk_s3::primitives::ByteStream;
Expand All @@ -14,6 +14,7 @@ use mockall::automock;
/// ----<block_number>
/// ----<snos_output.json>
/// ----<kzg.txt>
/// ----<blob_data.txt>
#[automock]
#[async_trait]
pub trait DataStorage: Send + Sync {
Expand Down
31 changes: 29 additions & 2 deletions crates/orchestrator/src/jobs/da_job/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use std::result::Result::{Err, Ok as OtherOk};
use std::str::FromStr;

use async_trait::async_trait;
use aws_sdk_s3::primitives::ByteStream;
use color_eyre::eyre::{eyre, Ok};
use color_eyre::Result;
use lazy_static::lazy_static;
Expand All @@ -18,6 +19,9 @@ use uuid::Uuid;
use super::types::{JobItem, JobStatus, JobType, JobVerificationStatus};
use super::Job;
use crate::config::Config;
use crate::data_storage::aws_s3::config::AWSS3Config;
use crate::data_storage::aws_s3::AWSS3;
use crate::data_storage::{DataStorage, DataStorageConfig};

lazy_static! {
/// EIP-4844 BLS12-381 modulus.
Expand Down Expand Up @@ -76,7 +80,7 @@ impl Job for DaJob {
MaybePendingStateUpdate::Update(state_update) => state_update,
};
// constructing the data from the rpc
let blob_data = state_update_to_blob_data(block_no, state_update, config).await?;
let blob_data = state_update_to_blob_data(block_no, state_update, config, false).await?;
// transforming the data so that we can apply FFT on this.
// @note: we can skip this step if in the above step we return vec<BigUint> directly
let blob_data_biguint = convert_to_biguint(blob_data.clone());
Expand Down Expand Up @@ -200,6 +204,7 @@ async fn state_update_to_blob_data(
block_no: u64,
state_update: StateUpdate,
config: &Config,
test_mode: bool,
) -> Result<Vec<FieldElement>> {
let state_diff = state_update.state_diff;
let mut blob_data: Vec<FieldElement> = vec![
Expand Down Expand Up @@ -272,9 +277,31 @@ async fn state_update_to_blob_data(
blob_data.push(*compiled_class_hash);
}

// saving the blob data of the block to S3 bucket (if not running in test mod)
if !test_mode {
store_blob_data_s3(blob_data.clone(), block_no, config).await?;
}

Ok(blob_data)
}

/// To store the blob data in S3 bucket with path <block_number>/blob_data.txt
async fn store_blob_data_s3(blob_data: Vec<FieldElement>, block_number: u64, config: &Config) -> Result<()> {
let s3_client = AWSS3::new(AWSS3Config::new_from_env()).await;
let key = block_number.to_string() + "/blob_data.txt";
let data_blob_big_uint = convert_to_biguint(blob_data.clone());

// TODO : Figure out the approach when there are multiple blobs in blobs_array
let blobs_array = data_to_blobs(config.da_client().max_bytes_per_blob().await, data_blob_big_uint).unwrap();
let blob = blobs_array[0].clone();

if !blobs_array.is_empty() {
s3_client.put_data(ByteStream::from(blob), &key).await?;
}

Ok(())
}

/// DA word encoding:
/// |---padding---|---class flag---|---new nonce---|---num changes---|
/// 127 bits 1 bit 64 bits 64 bits
Expand Down Expand Up @@ -382,7 +409,7 @@ mod tests {
get_nonce_attached(&server, nonce_file_path);

let state_update = read_state_update_from_file(state_update_file_path).expect("issue while reading");
let blob_data = state_update_to_blob_data(block_no, state_update, &config)
let blob_data = state_update_to_blob_data(block_no, state_update, &config, true)
.await
.expect("issue while converting state update to blob data");

Expand Down
120 changes: 120 additions & 0 deletions crates/orchestrator/src/jobs/state_update_job/kzg.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
use crate::data_storage::aws_s3::config::AWSS3Config;
use crate::data_storage::aws_s3::AWSS3;
use crate::data_storage::{DataStorage, DataStorageConfig};
use crate::jobs::state_update_job::CURRENT_PATH;
use alloy::eips::eip4844::BYTES_PER_BLOB;
use c_kzg::{Blob, Bytes32, KzgCommitment, KzgProof, KzgSettings};

/// Build KZG proof for a given block
pub async fn build_kzg_proof(block_number: u64, fetch_from_tests: Option<bool>) -> color_eyre::Result<KzgProof> {
let blob_data = fetch_blob_data_for_block(block_number, fetch_from_tests).await?;
let mut fixed_size_blob: [u8; BYTES_PER_BLOB] = [0; BYTES_PER_BLOB];
fixed_size_blob.copy_from_slice(blob_data.as_slice());

let x_0_value = fetch_x_0_value_from_os_output(block_number, fetch_from_tests).await?;

// trusted setup ceremony
let trusted_setup_path = CURRENT_PATH.join("src/jobs/state_update_job/trusted_setup.txt");
let trusted_setup =
KzgSettings::load_trusted_setup_file(trusted_setup_path.as_path()).expect("Error loading trusted setup file");

let blob = Blob::new(fixed_size_blob);
let commitment = KzgCommitment::blob_to_kzg_commitment(&blob, &trusted_setup)?;
let (kzg_proof, y_0_value) = KzgProof::compute_kzg_proof(&blob, &x_0_value, &trusted_setup)?;

// Verifying the proof for double check
let eval = KzgProof::verify_kzg_proof(
&commitment.to_bytes(),
&x_0_value,
&y_0_value,
&kzg_proof.to_bytes(),
&trusted_setup,
)?;
assert!(eval);

Ok(kzg_proof)
}

/// Fetching the blob data (stored in s3 during DA job) for a particular block
pub async fn fetch_blob_data_for_block(
block_number: u64,
fetch_from_tests: Option<bool>,
) -> color_eyre::Result<Vec<u8>> {
let fetch_from_tests = fetch_from_tests.unwrap_or(true);
let blob_data: Vec<u8> = match fetch_from_tests {
true => {
let blob_data_path =
CURRENT_PATH.join(format!("src/jobs/state_update_job/test_data/{}/blob_data.txt", block_number));
let data = std::fs::read_to_string(blob_data_path).expect("Failed to read the blob data txt file");
hex_string_to_u8_vec(&data).unwrap()
}
false => {
let s3_client = AWSS3::new(AWSS3Config::new_from_env()).await;
let key = block_number.to_string() + "/blob_data.txt";
let blob_data = s3_client.get_data(&key).await?;
blob_data.to_vec()
}
};

Ok(blob_data)
}

pub async fn fetch_x_0_value_from_os_output(
block_number: u64,
fetch_from_tests: Option<bool>,
) -> color_eyre::Result<Bytes32> {
let fetch_from_tests = fetch_from_tests.unwrap_or(true);
let x_0 = match fetch_from_tests {
true => {
let x_0_path = CURRENT_PATH.join(format!("src/jobs/state_update_job/test_data/{}/x_0.txt", block_number));
let data = std::fs::read_to_string(x_0_path)?;
Bytes32::from_hex(&data).unwrap()
}
false => unimplemented!(),
};

Ok(x_0)
}

// Util Functions
// ===============

/// Util function to convert hex string data into Vec<u8>
fn hex_string_to_u8_vec(hex_str: &str) -> color_eyre::Result<Vec<u8>, String> {
// Remove any spaces or non-hex characters from the input string
let cleaned_str: String = hex_str.chars().filter(|c| c.is_ascii_hexdigit()).collect();

// Convert the cleaned hex string to a Vec<u8>
let mut result = Vec::new();
for chunk in cleaned_str.as_bytes().chunks(2) {
if let Ok(byte_val) = u8::from_str_radix(std::str::from_utf8(chunk).unwrap(), 16) {
result.push(byte_val);
} else {
return Err(format!("Error parsing hex string: {}", cleaned_str));
}
}

Ok(result)
}

#[cfg(test)]
mod tests {
use crate::jobs::state_update_job::kzg::build_kzg_proof;
use c_kzg::Bytes48;
use rstest::rstest;

#[rstest]
#[case(630872)]
#[tokio::test]
async fn test_build_kzg_proof(#[case] block_number: u64) {
// testing the data in transaction :
// https://etherscan.io/tx/0x6b9fc547764a5d6e4451b5236b92e74c70800250f00fc1974fc0a75a459dc12e
let kzg_proof = build_kzg_proof(block_number, Some(true)).await.unwrap().to_bytes();
let original_proof_from_l1 = Bytes48::from_hex(
"a168b317e7c44691ee1932bd12fc6ac22182277e8fc5cd4cd21adc0831c33b1359aa5171bba529c69dcfe6224b220f8f",
)
.unwrap();

assert_eq!(kzg_proof, original_proof_from_l1);
}
}
32 changes: 17 additions & 15 deletions crates/orchestrator/src/jobs/state_update_job/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
mod kzg;

use std::collections::HashMap;
use std::path::PathBuf;

Expand All @@ -23,6 +25,7 @@ use crate::config::Config;
use crate::jobs::constants::{
JOB_METADATA_STATE_UPDATE_BLOCKS_TO_SETTLE_KEY, JOB_METADATA_STATE_UPDATE_FETCH_FROM_TESTS,
};
use crate::jobs::state_update_job::kzg::build_kzg_proof;
use crate::jobs::types::{JobItem, JobStatus, JobType, JobVerificationStatus};
use crate::jobs::Job;

Expand Down Expand Up @@ -226,9 +229,7 @@ impl StateUpdateJob {
let onchain_data_size = 0;
settlement_client.update_state_calldata(program_output, onchain_data_hash, onchain_data_size).await?
} else if snos.use_kzg_da == Felt252::ONE {
// TODO: Build the blob & the KZG proof & send them to update_state_blobs
let kzg_proof = self.fetch_kzg_proof_for_block(block_no, fetch_from_tests).await;
let kzg_proof: [u8; 48] = kzg_proof.try_into().expect("kzg proof size must be 48 bytes");
let kzg_proof = build_kzg_proof(block_no, fetch_from_tests).await?.to_owned();
settlement_client.update_state_blobs(vec![], kzg_proof).await?
} else {
return Err(eyre!("Block #{} - SNOS error, [use_kzg_da] should be either 0 or 1.", block_no));
Expand All @@ -253,18 +254,19 @@ impl StateUpdateJob {

/// Retrieves the KZG Proof for the corresponding block.
/// TODO: remove the fetch_from_tests argument once we have proper fetching (db/s3)
async fn fetch_kzg_proof_for_block(&self, block_no: u64, fetch_from_tests: Option<bool>) -> Vec<u8> {
let fetch_from_tests = fetch_from_tests.unwrap_or(true);
let kzg_proof_str = match fetch_from_tests {
true => {
let kzg_path =
CURRENT_PATH.join(format!("src/jobs/state_update_job/test_data/{}/kzg_proof.txt", block_no));
std::fs::read_to_string(kzg_path).expect("Failed to read the KZG txt file").replace("0x", "")
}
false => unimplemented!("can't fetch KZG Proof from DB/S3"),
};
hex::decode(kzg_proof_str).expect("Invalid test kzg proof")
}
/// No longer needed as we are building the proof during the state update job run
// async fn fetch_kzg_proof_for_block(&self, block_no: u64, fetch_from_tests: Option<bool>) -> Vec<u8> {
// let fetch_from_tests = fetch_from_tests.unwrap_or(true);
// let kzg_proof_str = match fetch_from_tests {
// true => {
// let kzg_path =
// CURRENT_PATH.join(format!("src/jobs/state_update_job/test_data/{}/kzg_proof.txt", block_no));
// std::fs::read_to_string(kzg_path).expect("Failed to read the KZG txt file").replace("0x", "")
// }
// false => unimplemented!("can't fetch KZG Proof from DB/S3"),
// };
// hex::decode(kzg_proof_str).expect("Invalid test kzg proof")
// }

/// Insert the tx hashes into the the metadata for the attempt number - will be used later by
/// verify_job to make sure that all tx are successful.
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
0x01cab333ee4c0b03ba79bb51bc537545e3aef820434c0c06e00235dd9ccdafdf
Loading

0 comments on commit fb54e77

Please sign in to comment.