Skip to content

Commit

Permalink
refactor: cost estimator (#123)
Browse files Browse the repository at this point in the history
* BootInfo changes to support custom rollup configs

* return to upstream kona

* preserialize rollup config & fetch proper data from op geth

* just recipe adds hash to zkconfig automatically

* contract upgrades

* make latestoutputindex internally callable

* update public values to include rollup config hash (breaks tests, need to replace w new proof

* pass config bytes with boot info

* move fetch rollup config to rust binary

* gitignore

* fix

* fix: client programs

* fix

* fix

* add

* add

* fix

* add

* fix

* fix

* add

* fix

* clean

* add

* bindings

* fix

* test

* update elf's

* release

* automatically update config

* fetch rollup config

* feat: generate config from script

* feat: remove methods

* fix: forge fmt

* del unused fn

* fetch rollup config

* add docs

* docs

* docs

* docs

* book removal

* fix

* fix

* feat: Use strict types, rather than type mucking

* fix: Use RPC mode, add rollup config saving

* docs: rename to zkl2ooconfig.json

* docs: rm todo

* add rollup config

* fetch

* add

* add

* add

* fix

* clean

* add

* fix: add rollup config path to args

* add

* feat: Load OP Stack Rollup Config (#121)

* load op stack rollup cfg fixes

* Parse the rust rollup config and default return the entire range

* fix: import paths for rollup cfg

* add

* update

* docs

* lint

---------

Co-authored-by: Ubuntu <[email protected]>

* add todo

* refactror: cost estimator

* logs

* concurrent native host runners

* some small errors

* docs

* feat: readd support for the deps

* clean

* fixes

* build

* build

---------

Co-authored-by: Zach Obront <[email protected]>
Co-authored-by: Ubuntu <[email protected]>
Co-authored-by: Ubuntu <[email protected]>
  • Loading branch information
4 people authored Sep 23, 2024
1 parent 707dfb7 commit 1dcde31
Show file tree
Hide file tree
Showing 14 changed files with 107 additions and 93 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ kona-executor = { git = "https://github.com/anton-rs/kona", tag = "kona-client-v
kona-client = { git = "https://github.com/anton-rs/kona", tag = "kona-client-v0.1.0-alpha.3" }
kona-host = { git = "https://github.com/anton-rs/kona", tag = "kona-client-v0.1.0-alpha.3" }


# op-succinct
op-succinct-prove = { path = "scripts/prove" }
op-succinct-witnessgen = { path = "scripts/witnessgen" }
Expand Down
Binary file modified elf/aggregation-elf
Binary file not shown.
Binary file modified elf/fault-proof-elf
Binary file not shown.
Binary file modified elf/range-elf
Binary file not shown.
6 changes: 3 additions & 3 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ run-client-native l2_block_num l1_rpc='${L1_RPC}' l1_beacon_rpc='${L1_BEACON_RPC
echo "L1 Beacon Address: $L1_BEACON_ADDRESS"
echo "L2 Node Address: $L2_NODE_ADDRESS"
HOST_BIN_PATH="./kona-host"
CLIENT_BIN_PATH="./target/release-client-lto/zkvm-client"
CLIENT_BIN_PATH="$(pwd)/target/release-client-lto/fault-proof"
L2_BLOCK_NUMBER="{{l2_block_num}}"
L2_BLOCK_SAFE_HEAD=$((L2_BLOCK_NUMBER - 1))
L2_OUTPUT_STATE_ROOT=$(cast block --rpc-url $L2_NODE_ADDRESS --field stateRoot $L2_BLOCK_SAFE_HEAD)
Expand All @@ -67,9 +67,9 @@ run-client-native l2_block_num l1_rpc='${L1_RPC}' l1_beacon_rpc='${L1_BEACON_RPC
DATA_DIRECTORY="./data/$L2_BLOCK_NUMBER"
echo "Saving Data to $DATA_DIRECTORY"
echo "Building client program..."
cargo build --bin zkvm-client --profile release-client-lto
cargo build --bin fault-proof --profile release-client-lto
echo "Running host program with native client program..."
cargo run --bin native-host --release -- \
cargo run --bin op-succinct-witnessgen --release -- \
--l1-head $L1_HEAD \
--l2-head $L2_HEAD \
--l2-claim $L2_CLAIM \
Expand Down
2 changes: 1 addition & 1 deletion programs/aggregation/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use sha2::{Digest, Sha256};
///
/// Whenever the multi-block program changes, you will need to update this.
const MULTI_BLOCK_PROGRAM_VKEY_DIGEST: [u32; 8] =
[1118246686, 876333357, 1751535833, 1253784834, 30755083, 1698492772, 1080266404, 1468343823];
[1039893330, 1594505873, 415997013, 1198691665, 71280582, 651429912, 87063347, 1840814573];

pub fn main() {
// Read in the public values corresponding to each multi-block proof.
Expand Down
2 changes: 2 additions & 0 deletions programs/fault-proof/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,5 +132,7 @@ fn main() {

assert_eq!(number, boot.l2_claim_block);
assert_eq!(output_root, boot.l2_claim);

println!("Validated derivation and STF. Output Root: {}", output_root);
});
}
4 changes: 2 additions & 2 deletions programs/range/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,14 +181,14 @@ fn main() {
let output_root = executor.compute_output_root().unwrap();
println!("cycle-tracker-end: output-root");

println!("Completed Proof. Output Root: {}", output_root);

////////////////////////////////////////////////////////////////
// EPILOGUE //
////////////////////////////////////////////////////////////////

// Note: We don't need the last_block_num == claim_block check, because it's the only way to
// exit the above loop
assert_eq!(output_root, boot.l2_claim);

println!("Validated derivation and STF. Output Root: {}", output_root);
});
}
1 change: 1 addition & 0 deletions scripts/prove/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ log.workspace = true
csv.workspace = true
serde = { workspace = true }
reqwest = { workspace = true }
futures.workspace = true
rayon = "1.10.0"
serde_json.workspace = true

Expand Down
63 changes: 46 additions & 17 deletions scripts/prove/bin/cost_estimator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use op_succinct_host_utils::{
fetcher::{CacheMode, OPSuccinctDataFetcher, RPCMode},
get_proof_stdin,
rollup_config::read_rollup_config,
stats::{get_execution_stats, ExecutionStats},
stats::ExecutionStats,
witnessgen::WitnessGenExecutor,
ProgramType,
};
Expand All @@ -17,15 +17,17 @@ use serde::{Deserialize, Serialize};
use sp1_sdk::{utils, ProverClient};
use std::{
cmp::{max, min},
collections::HashMap,
env,
fs::{self},
future::Future,
net::TcpListener,
path::PathBuf,
process::{Command, Stdio},
sync::Arc,
time::Instant,
};
use tokio::task::block_in_place;
use tokio::{sync::Mutex, task::block_in_place};

pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf");

Expand Down Expand Up @@ -121,7 +123,7 @@ fn get_max_span_batch_range_size(chain_id: u64) -> u64 {
const DEFAULT_SIZE: u64 = 1000;
match chain_id {
8453 => 5, // Base
11155111 => 20, // OP Sepolia
11155111 => 40, // OP Sepolia
10 => 10, // OP Mainnet
_ => DEFAULT_SIZE,
}
Expand Down Expand Up @@ -227,21 +229,48 @@ pub fn block_on<T>(fut: impl Future<Output = T>) -> T {

/// Run the zkVM execution process for each split range in parallel.
async fn execute_blocks_parallel(
host_clis: &[BatchHostCli],
host_clis: Vec<BatchHostCli>,
prover: &ProverClient,
data_fetcher: &OPSuccinctDataFetcher,
) -> Vec<ExecutionStats> {
host_clis
.par_iter()
.map(|r| {
let sp1_stdin = get_proof_stdin(&r.host_cli).unwrap();

let start_time = Instant::now();
let (_, report) = prover.execute(MULTI_BLOCK_ELF, sp1_stdin).run().unwrap();
let execution_duration = start_time.elapsed();
block_on(get_execution_stats(data_fetcher, r.start, r.end, &report, execution_duration))
})
.collect()
// Create a new execution stats map between the start and end block and the default ExecutionStats.
let execution_stats_map = Arc::new(Mutex::new(HashMap::new()));

// Fetch all of the execution stats block ranges in parallel.
let mut handles = Vec::new();
for (start, end) in host_clis.iter().map(|r| (r.start, r.end)) {
let execution_stats_map = Arc::clone(&execution_stats_map);
let handle = tokio::spawn(async move {
// Create a new data fetcher. This avoids the runtime dropping the provider dispatch task.
let data_fetcher = OPSuccinctDataFetcher::new().await;
let mut exec_stats = ExecutionStats::default();
exec_stats.add_block_data(&data_fetcher, start, end).await;
let mut execution_stats_map = execution_stats_map.lock().await;
execution_stats_map.insert((start, end), exec_stats);
});
handles.push(handle);
}
futures::future::join_all(handles).await;

// Run the zkVM execution process for each split range in parallel and fill in the execution stats.
host_clis.par_iter().for_each(|r| {
let sp1_stdin = get_proof_stdin(&r.host_cli).unwrap();

let start_time = Instant::now();
let (_, report) = prover.execute(MULTI_BLOCK_ELF, sp1_stdin).run().unwrap();
let execution_duration = start_time.elapsed();

// Get the existing execution stats and modify it in place.
let mut execution_stats_map = block_on(execution_stats_map.lock());
let exec_stats = execution_stats_map.get_mut(&(r.start, r.end)).unwrap();
exec_stats.add_report_data(&report, execution_duration);
exec_stats.add_aggregate_data();
});

info!("Execution is complete.");

let execution_stats = execution_stats_map.lock().await.clone().into_values().collect();
drop(execution_stats_map);
execution_stats
}

/// Write the execution stats to a CSV file.
Expand Down Expand Up @@ -415,7 +444,7 @@ async fn main() -> Result<()> {
let prover = ProverClient::new();
let host_clis = run_native_data_generation(&data_fetcher, &split_ranges).await;

let execution_stats = execute_blocks_parallel(&host_clis, &prover, &data_fetcher).await;
let execution_stats = execute_blocks_parallel(host_clis, &prover).await;

// Sort the execution stats by batch start block.
let mut sorted_execution_stats = execution_stats.clone();
Expand Down
10 changes: 6 additions & 4 deletions scripts/prove/bin/multi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use clap::Parser;
use op_succinct_host_utils::{
fetcher::{CacheMode, OPSuccinctDataFetcher, RPCMode},
get_proof_stdin,
stats::get_execution_stats,
stats::ExecutionStats,
witnessgen::WitnessGenExecutor,
ProgramType,
};
Expand Down Expand Up @@ -100,9 +100,11 @@ async fn main() -> Result<()> {
fs::create_dir_all(&report_dir).unwrap();
}

let stats =
get_execution_stats(&data_fetcher, args.start, args.end, &report, execution_duration)
.await;
let mut stats = ExecutionStats::default();
stats.add_block_data(&data_fetcher, args.start, args.end).await;
stats.add_report_data(&report, execution_duration);
stats.add_aggregate_data();

println!("Execution Stats: \n{:?}", stats);

// Write to CSV.
Expand Down
4 changes: 3 additions & 1 deletion utils/host/src/fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,8 @@ impl OPSuccinctDataFetcher {
}

/// Get the provider for the given RPC mode. Note: Will panic if the RPC mode is not L1 or L2.
/// Note: The provider can be dropped by the Tokio runtime if it is not used for a long time. Be
/// careful when using this function.
pub fn get_provider(&self, rpc_mode: RPCMode) -> Arc<RootProvider<Http<Client>>> {
match rpc_mode {
RPCMode::L1 => self.l1_provider.clone(),
Expand Down Expand Up @@ -430,7 +432,7 @@ impl OPSuccinctDataFetcher {
exec: Some(exec_directory),
server: false,
rollup_config_path: Some(rollup_config_path.into()),
v: 0,
v: std::env::var("VERBOSITY").unwrap_or("0".to_string()).parse().unwrap(),
})
}
}
106 changes: 42 additions & 64 deletions utils/host/src/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,74 +68,52 @@ impl fmt::Display for ExecutionStats {
}
}

/// Get the execution stats for a given report.
pub async fn get_execution_stats(
data_fetcher: &OPSuccinctDataFetcher,
start: u64,
end: u64,
report: &ExecutionReport,
execution_duration: Duration,
) -> ExecutionStats {
// Get the total instruction count for execution across all blocks.
let block_execution_instruction_count: u64 =
*report.cycle_tracker.get("block-execution").unwrap_or(&0);
let oracle_verify_instruction_count: u64 =
*report.cycle_tracker.get("oracle-verify").unwrap_or(&0);
let derivation_instruction_count: u64 =
*report.cycle_tracker.get("payload-derivation").unwrap_or(&0);
let blob_verification_instruction_count: u64 =
*report.cycle_tracker.get("blob-verification").unwrap_or(&0);
impl ExecutionStats {
/// Add the on-chain data for the given block range to the stats.
pub async fn add_block_data(
&mut self,
data_fetcher: &OPSuccinctDataFetcher,
start: u64,
end: u64,
) {
let block_data = data_fetcher
.get_block_data_range(RPCMode::L2, start, end)
.await
.expect("Failed to fetch block data range.");

let nb_blocks = end - start + 1;

// Fetch the number of transactions in the blocks from the L2 RPC.
let block_data_range = data_fetcher
.get_block_data_range(RPCMode::L2, start, end)
.await
.expect("Failed to fetch block data range.");

let nb_transactions = block_data_range.iter().map(|b| b.transaction_count).sum();
let total_gas_used = block_data_range.iter().map(|b| b.gas_used).sum();

let bn_add_cycles: u64 = *report.cycle_tracker.get("precompile-bn-add").unwrap_or(&0);
let bn_mul_cycles: u64 = *report.cycle_tracker.get("precompile-bn-mul").unwrap_or(&0);
let bn_pair_cycles: u64 = *report.cycle_tracker.get("precompile-bn-pair").unwrap_or(&0);
let kzg_eval_cycles: u64 = *report.cycle_tracker.get("precompile-kzg-eval").unwrap_or(&0);
let ec_recover_cycles: u64 = *report.cycle_tracker.get("precompile-ec-recover").unwrap_or(&0);

let total_instruction_count = report.total_instruction_count();
self.batch_start = start;
self.batch_end = end;
self.nb_transactions = block_data.iter().map(|b| b.transaction_count).sum();
self.eth_gas_used = block_data.iter().map(|b| b.gas_used).sum();
self.nb_blocks = end - start + 1;
}

// Cycles per block, transaction are computed with respect to the total instruction count.
let cycles_per_block = total_instruction_count / nb_blocks;
let cycles_per_transaction = total_instruction_count / nb_transactions;
/// Add the execution report data to the stats.
pub fn add_report_data(&mut self, report: &ExecutionReport, execution_duration: Duration) {
let cycle_tracker = &report.cycle_tracker;
let get_cycles = |key: &str| *cycle_tracker.get(key).unwrap_or(&0);

let transactions_per_block = nb_transactions / nb_blocks;
let gas_used_per_block = total_gas_used / nb_blocks;
let gas_used_per_transaction = total_gas_used / nb_transactions;
self.total_instruction_count = report.total_instruction_count();
self.block_execution_instruction_count = get_cycles("block-execution");
self.oracle_verify_instruction_count = get_cycles("oracle-verify");
self.derivation_instruction_count = get_cycles("payload-derivation");
self.blob_verification_instruction_count = get_cycles("blob-verification");
self.bn_add_cycles = get_cycles("precompile-bn-add");
self.bn_mul_cycles = get_cycles("precompile-bn-mul");
self.bn_pair_cycles = get_cycles("precompile-bn-pair");
self.kzg_eval_cycles = get_cycles("precompile-kzg-eval");
self.ec_recover_cycles = get_cycles("precompile-ec-recover");
self.total_sp1_gas = report.estimate_gas();
self.execution_duration_sec = execution_duration.as_secs();
}

ExecutionStats {
batch_start: start,
batch_end: end,
execution_duration_sec: execution_duration.as_secs(),
total_instruction_count,
derivation_instruction_count,
oracle_verify_instruction_count,
block_execution_instruction_count,
blob_verification_instruction_count,
total_sp1_gas: report.estimate_gas(),
nb_blocks,
nb_transactions,
eth_gas_used: total_gas_used,
cycles_per_block,
cycles_per_transaction,
transactions_per_block,
gas_used_per_block,
gas_used_per_transaction,
bn_add_cycles,
bn_mul_cycles,
bn_pair_cycles,
kzg_eval_cycles,
ec_recover_cycles,
/// Add the aggregate statistics data (assumes that the block data and report data have already been added)
pub fn add_aggregate_data(&mut self) {
self.cycles_per_block = self.total_instruction_count / self.nb_blocks;
self.cycles_per_transaction = self.total_instruction_count / self.nb_transactions;
self.transactions_per_block = self.nb_transactions / self.nb_blocks;
self.gas_used_per_block = self.eth_gas_used / self.nb_blocks;
self.gas_used_per_transaction = self.eth_gas_used / self.nb_transactions;
}
}

Expand Down

0 comments on commit 1dcde31

Please sign in to comment.