diff --git a/.dockerignore b/.dockerignore index d82a5b35..fa3a0e9e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,6 @@ .vscode -**/target/ -**/snapshots/ +/**/target/ +/**/snapshots/ Cargo.lock +/**/compute_data/ +/**/rollups_data/ diff --git a/.gitignore b/.gitignore index 207f4551..413011fc 100644 --- a/.gitignore +++ b/.gitignore @@ -7,8 +7,10 @@ target/ snapshots/ common-rs/Cargo.lock prt/client-rs/Cargo.lock -prt/lua_poc/outputs/ -prt/lua_poc/pixels/ +prt/tests/compute/outputs/ +prt/tests/compute/pixels/ node_modules **/contract-bindings/src/contract **/contract-bindings/Cargo.lock +**/compute_data/ +**/rollups_data/ diff --git a/cartesi-rollups/contract-bindings/Cargo.toml b/cartesi-rollups/contract-bindings/Cargo.toml index 75c50cd3..b84e4107 100644 --- a/cartesi-rollups/contract-bindings/Cargo.toml +++ b/cartesi-rollups/contract-bindings/Cargo.toml @@ -15,5 +15,5 @@ readme = "README.md" repository = "https://github.com/cartesi/dave" [dependencies] -alloy = { version = "0.3.1", features = ["sol-types", "contract"] } +alloy = { version = "0.8.0", features = ["sol-types", "contract"] } diff --git a/cartesi-rollups/contracts/deploy_anvil.sh b/cartesi-rollups/contracts/deploy_anvil.sh new file mode 100755 index 00000000..2fc2e0cb --- /dev/null +++ b/cartesi-rollups/contracts/deploy_anvil.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -euo pipefail + +INITIAL_HASH=`xxd -p -c32 "${MACHINE_PATH}/hash"` + +export PRIVATE_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 + +forge script \ + script/DaveConsensus.s.sol \ + --fork-url "http://127.0.0.1:8545" \ + --broadcast \ + --sig "run(bytes32)" \ + "${INITIAL_HASH}" \ + -vvvv diff --git a/cartesi-rollups/contracts/foundry.toml b/cartesi-rollups/contracts/foundry.toml index f7298151..6c7b826f 100644 --- a/cartesi-rollups/contracts/foundry.toml +++ b/cartesi-rollups/contracts/foundry.toml @@ -14,4 +14,6 @@ allow_paths = [ '../../machine/step/', ] +solc-version = "0.8.27" + # See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options diff --git a/cartesi-rollups/contracts/lib/rollups-contracts b/cartesi-rollups/contracts/lib/rollups-contracts index 7ece9c3d..98611a85 160000 --- a/cartesi-rollups/contracts/lib/rollups-contracts +++ b/cartesi-rollups/contracts/lib/rollups-contracts @@ -1 +1 @@ -Subproject commit 7ece9c3d4821ada727556281934437ec5e591a7e +Subproject commit 98611a854ae72d4ee1857f9b251839eb438f28fb diff --git a/cartesi-rollups/contracts/script/DaveConsensus.s.sol b/cartesi-rollups/contracts/script/DaveConsensus.s.sol new file mode 100644 index 00000000..7d662f60 --- /dev/null +++ b/cartesi-rollups/contracts/script/DaveConsensus.s.sol @@ -0,0 +1,26 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +pragma solidity ^0.8.17; + +import {Script} from "forge-std/Script.sol"; + +import {Machine} from "prt-contracts/Machine.sol"; + +import "prt-contracts/tournament/factories/MultiLevelTournamentFactory.sol"; +import "rollups-contracts/inputs/InputBox.sol"; +import "src/DaveConsensus.sol"; + +contract DaveConcensusScript is Script { + function run(Machine.Hash initialHash) external { + vm.startBroadcast(vm.envUint("PRIVATE_KEY")); + + InputBox inputBox = new InputBox(); + MultiLevelTournamentFactory factory = new MultiLevelTournamentFactory( + new TopTournamentFactory(), new MiddleTournamentFactory(), new BottomTournamentFactory() + ); + new DaveConsensus(inputBox, address(0x0), factory, initialHash); + + vm.stopBroadcast(); + } +} diff --git a/cartesi-rollups/node/Cargo.toml b/cartesi-rollups/node/Cargo.toml index c462257d..2beb706f 100644 --- a/cartesi-rollups/node/Cargo.toml +++ b/cartesi-rollups/node/Cargo.toml @@ -35,14 +35,15 @@ cartesi-dave-contracts = { path = "../contract-bindings" } cartesi-dave-merkle = { path = "../../common-rs/merkle" } cartesi-prt-core = { path = "../../prt/client-rs" } -alloy = { version = "0.3.1", features = ["sol-types", "contract", "network", "reqwest", "signers", "signer-local"] } +alloy = { version = "0.8.0", features = ["sol-types", "contract", "network", "reqwest", "signers", "signer-local"] } anyhow = "1.0" async-recursion = "1" async-trait = "0.1.74" -cartesi-rollups-contracts = "2.0.0-rc.9" +cartesi-rollups-contracts = "2.0.0-rc.12" clap = { version = "4.5.7", features = ["derive", "env"] } clap_derive = "=4.5.13" futures = "0.3" log = "0.4" +num-traits = "0.2.19" thiserror = "1.0" tokio = { version = "1", features = ["full"] } diff --git a/cartesi-rollups/node/Dockerfile.test b/cartesi-rollups/node/Dockerfile.test index c8e22342..946e4cc2 100644 --- a/cartesi-rollups/node/Dockerfile.test +++ b/cartesi-rollups/node/Dockerfile.test @@ -8,14 +8,14 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y ENV PATH="/root/.cargo/bin:${PATH}" -COPY --from=ethereum/solc:0.8.23 /usr/bin/solc /usr/bin/solc +COPY --from=ethereum/solc:0.8.27 /usr/bin/solc /usr/bin/solc RUN chmod u+x /usr/bin/solc WORKDIR /app RUN wget https://github.com/cartesi/image-kernel/releases/download/v0.20.0/linux-6.5.13-ctsi-1-v0.20.0.bin \ -O ./linux.bin -RUN wget https://github.com/cartesi/machine-emulator-tools/releases/download/v0.15.0/rootfs-tools-v0.15.0.ext2 \ +RUN wget https://github.com/cartesi/machine-emulator-tools/releases/download/v0.16.1/rootfs-tools-v0.16.1.ext2 \ -O ./rootfs.ext2 RUN cartesi-machine --ram-image=./linux.bin \ diff --git a/cartesi-rollups/node/blockchain-reader/Cargo.toml b/cartesi-rollups/node/blockchain-reader/Cargo.toml index 0d18f90a..041c7c7e 100644 --- a/cartesi-rollups/node/blockchain-reader/Cargo.toml +++ b/cartesi-rollups/node/blockchain-reader/Cargo.toml @@ -12,14 +12,15 @@ repository = { workspace = true } [dependencies] rollups-state-manager = { workspace = true } -alloy = { workspace = true } -async-recursion = { workspace = true } cartesi-dave-contracts = { workspace = true } cartesi-rollups-contracts = { workspace = true } + +alloy = { workspace = true } +alloy-rpc-types-eth = "0.8.0" +async-recursion = { workspace = true } clap = { workspace = true } clap_derive = { workspace = true } +log = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } - -alloy-rpc-types-eth = "0.3.1" -num-traits = "0.2.19" +num-traits = { workspace = true } diff --git a/cartesi-rollups/node/blockchain-reader/src/lib.rs b/cartesi-rollups/node/blockchain-reader/src/lib.rs index 12a07492..ca15f276 100644 --- a/cartesi-rollups/node/blockchain-reader/src/lib.rs +++ b/cartesi-rollups/node/blockchain-reader/src/lib.rs @@ -7,6 +7,7 @@ use crate::error::{ProviderErrors, Result}; use alloy::{ contract::{Error, Event}, eips::BlockNumberOrTag::Finalized, + hex::ToHexExt, providers::{ network::primitives::BlockTransactionsKind, Provider, ProviderBuilder, RootProvider, }, @@ -17,8 +18,10 @@ use alloy_rpc_types_eth::Topic; use async_recursion::async_recursion; use clap::Parser; use error::BlockchainReaderError; +use log::{info, trace}; use num_traits::cast::ToPrimitive; use std::{ + iter::Peekable, marker::{Send, Sync}, str::FromStr, sync::Arc, @@ -29,18 +32,21 @@ use cartesi_dave_contracts::daveconsensus::DaveConsensus::EpochSealed; use cartesi_rollups_contracts::inputbox::InputBox::InputAdded; use rollups_state_manager::{Epoch, Input, InputId, StateManager}; +const DEVNET_CONSENSUS_ADDRESS: &str = "0x5FC8d32690cc91D4c39d9d3abcBD16989F875707"; +const DEVNET_INPUT_BOX_ADDRESS: &str = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; + #[derive(Debug, Clone, Parser)] #[command(name = "cartesi_rollups_config")] #[command(about = "Addresses of Cartesi Rollups")] pub struct AddressBook { /// address of app - #[arg(long, env)] + #[arg(long, env, default_value_t = Address::ZERO)] app: Address, /// address of Dave consensus - #[arg(long, env)] + #[arg(long, env, default_value = DEVNET_CONSENSUS_ADDRESS)] pub consensus: Address, /// address of input box - #[arg(long, env)] + #[arg(long, env, default_value = DEVNET_INPUT_BOX_ADDRESS)] input_box: Address, } @@ -85,9 +91,11 @@ where pub async fn start(&mut self) -> Result<(), SM> { loop { let current_block = self.provider.latest_finalized_block().await?; - self.advance(self.prev_block, current_block).await?; - self.prev_block = current_block; + if current_block > self.prev_block { + self.advance(self.prev_block, current_block).await?; + self.prev_block = current_block; + } tokio::time::sleep(self.sleep_duration).await; } } @@ -116,13 +124,23 @@ where .collect_sealed_epochs(prev_block, current_block) .await?; + let last_sealed_epoch_opt = self + .state_manager + .last_sealed_epoch() + .map_err(|e| BlockchainReaderError::StateManagerError(e))?; + let mut merged_sealed_epochs = Vec::new(); + if let Some(last_sealed_epoch) = last_sealed_epoch_opt { + merged_sealed_epochs.push(last_sealed_epoch); + } + merged_sealed_epochs.extend(sealed_epochs.clone()); + let merged_sealed_epochs_iter = merged_sealed_epochs + .iter() + .collect::>() + .into_iter(); + // read inputs from blockchain let inputs = self - .collect_inputs( - prev_block, - current_block, - sealed_epochs.iter().collect::>().into_iter(), - ) + .collect_inputs(prev_block, current_block, merged_sealed_epochs_iter) .await?; Ok((inputs, sealed_epochs)) @@ -144,18 +162,25 @@ where ) .await? .iter() - .map(|e| Epoch { - epoch_number: e - .0 - .epochNumber - .to_u64() - .expect("fail to convert epoch number"), - epoch_boundary: e - .0 - .blockNumberUpperBound - .to_u64() - .expect("fail to convert epoch boundary"), - root_tournament: e.0.tournament.to_string(), + .map(|e| { + let epoch = Epoch { + epoch_number: e + .0 + .epochNumber + .to_u64() + .expect("fail to convert epoch number"), + epoch_boundary: e + .0 + .blockNumberUpperBound + .to_u64() + .expect("fail to convert epoch boundary"), + root_tournament: e.0.tournament.to_string(), + }; + info!( + "epoch received: epoch_number {}, epoch_boundary {}, root_tournament {}", + epoch.epoch_number, epoch.epoch_boundary, epoch.root_tournament + ); + epoch }) .collect()) } @@ -193,61 +218,63 @@ where }; let mut inputs = vec![]; - let mut input_events_iter = input_events.iter(); + let mut input_events_peekable = input_events.iter().peekable(); for epoch in sealed_epochs_iter { + if last_epoch_number > epoch.epoch_number { + continue; + } // iterate through newly sealed epochs, fill in the inputs accordingly - let inputs_of_epoch = self - .construct_input_ids( - epoch.epoch_number, - epoch.epoch_boundary, - &mut next_input_index_in_epoch, - &mut input_events_iter, - ) - .await; + let inputs_of_epoch = self.construct_input_ids( + epoch.epoch_number, + epoch.epoch_boundary, + &mut next_input_index_in_epoch, + &mut input_events_peekable, + ); inputs.extend(inputs_of_epoch); last_epoch_number = epoch.epoch_number + 1; } // all remaining inputs belong to an epoch that's not sealed yet - let inputs_of_epoch = self - .construct_input_ids( - last_epoch_number, - u64::MAX, - &mut next_input_index_in_epoch, - &mut input_events_iter, - ) - .await; + let inputs_of_epoch = self.construct_input_ids( + last_epoch_number, + u64::MAX, + &mut next_input_index_in_epoch, + &mut input_events_peekable, + ); inputs.extend(inputs_of_epoch); Ok(inputs) } - async fn construct_input_ids( + fn construct_input_ids<'a>( &self, epoch_number: u64, epoch_boundary: u64, next_input_index_in_epoch: &mut u64, - input_events_iter: &mut impl Iterator, + input_events_peekable: &mut Peekable>, ) -> Vec { let mut inputs = vec![]; - while input_events_iter - .peekable() - .peek() - .expect("fail to get peek next input") - .1 - < epoch_boundary - { + while let Some(input_added) = input_events_peekable.peek() { + if input_added.1 >= epoch_boundary { + break; + } let input = Input { id: InputId { epoch_number, input_index_in_epoch: *next_input_index_in_epoch, }, - data: input_events_iter.next().unwrap().0.input.to_vec(), + data: input_added.0.input.to_vec(), }; + info!( + "input received: epoch_number {}, input_index {}", + input.id.epoch_number, input.id.input_index_in_epoch, + ); + trace!("input data 0x{}", input.data.encode_hex()); + input_events_peekable.next(); *next_input_index_in_epoch += 1; inputs.push(input); } diff --git a/cartesi-rollups/node/compute-runner/Cargo.toml b/cartesi-rollups/node/compute-runner/Cargo.toml index 7b9c0122..5dacc99c 100644 --- a/cartesi-rollups/node/compute-runner/Cargo.toml +++ b/cartesi-rollups/node/compute-runner/Cargo.toml @@ -14,3 +14,4 @@ cartesi-prt-core = { workspace = true } rollups-state-manager = { workspace = true } alloy = { workspace = true } +log = { workspace = true } diff --git a/cartesi-rollups/node/compute-runner/src/lib.rs b/cartesi-rollups/node/compute-runner/src/lib.rs index fad17c81..bb7060b9 100644 --- a/cartesi-rollups/node/compute-runner/src/lib.rs +++ b/cartesi-rollups/node/compute-runner/src/lib.rs @@ -1,17 +1,18 @@ use alloy::sol_types::private::Address; +use log::error; use std::result::Result; use std::{str::FromStr, sync::Arc, time::Duration}; use cartesi_prt_core::{ arena::{BlockchainConfig, EthArenaSender}, - db::dispute_state_access::{Input, Leaf}, + db::compute_state_access::{Input, Leaf}, strategy::player::Player, }; use rollups_state_manager::StateManager; pub struct ComputeRunner { + arena_sender: EthArenaSender, config: BlockchainConfig, - sender: EthArenaSender, sleep_duration: Duration, state_manager: Arc, } @@ -20,11 +21,15 @@ impl ComputeRunner where ::Error: Send + Sync + 'static, { - pub fn new(config: &BlockchainConfig, state_manager: Arc, sleep_duration: u64) -> Self { - let sender = EthArenaSender::new(&config).expect("fail to initialize sender"); + pub fn new( + arena_sender: EthArenaSender, + config: &BlockchainConfig, + state_manager: Arc, + sleep_duration: u64, + ) -> Self { Self { + arena_sender, config: config.clone(), - sender, sleep_duration: Duration::from_secs(sleep_duration), state_manager, } @@ -32,35 +37,50 @@ where pub async fn start(&mut self) -> Result<(), ::Error> { loop { - if let Some(last_sealed_epoch) = self.state_manager.last_epoch()? { - if let Some(snapshot) = self + // participate in last sealed epoch tournament + if let Some(last_sealed_epoch) = self.state_manager.last_sealed_epoch()? { + match self .state_manager - .snapshot(last_sealed_epoch.epoch_number, 0)? + .computation_hash(last_sealed_epoch.epoch_number)? { - let inputs = self.state_manager.inputs(last_sealed_epoch.epoch_number)?; - let leafs = self - .state_manager - .machine_state_hashes(last_sealed_epoch.epoch_number)?; - let mut player = Player::new( - inputs.into_iter().map(|i| Input(i)).collect(), - leafs - .into_iter() - .map(|l| { - Leaf( - l.0.as_slice() - .try_into() - .expect("fail to convert leaf from machine state hash"), - l.1, - ) - }) - .collect(), - &self.config, - snapshot, - Address::from_str(&last_sealed_epoch.root_tournament) - .expect("fail to convert tournament address"), - ) - .expect("fail to initialize compute player"); - let _ = player.react_once(&self.sender).await; + Some(_) => { + if let Some(snapshot) = self + .state_manager + .snapshot(last_sealed_epoch.epoch_number, 0)? + { + let inputs = + self.state_manager.inputs(last_sealed_epoch.epoch_number)?; + let leafs = self + .state_manager + .machine_state_hashes(last_sealed_epoch.epoch_number)?; + let mut player = Player::new( + Some(inputs.into_iter().map(|i| Input(i)).collect()), + leafs + .into_iter() + .map(|l| { + Leaf( + l.0.as_slice().try_into().expect( + "fail to convert leafs from machine state hash", + ), + l.1, + ) + }) + .collect(), + &self.config, + snapshot, + Address::from_str(&last_sealed_epoch.root_tournament) + .expect("fail to convert tournament address"), + ) + .expect("fail to initialize compute player"); + let _ = player + .react_once(&self.arena_sender) + .await + .inspect_err(|e| error!("{e}")); + } + } + None => { + // wait for the `machine-runner` to insert the value + } } } std::thread::sleep(self.sleep_duration); diff --git a/cartesi-rollups/node/dave-rollups/src/lib.rs b/cartesi-rollups/node/dave-rollups/src/lib.rs index 6991c6d9..40d7b153 100644 --- a/cartesi-rollups/node/dave-rollups/src/lib.rs +++ b/cartesi-rollups/node/dave-rollups/src/lib.rs @@ -1,4 +1,4 @@ -use cartesi_prt_core::arena::BlockchainConfig; +use cartesi_prt_core::arena::{BlockchainConfig, EthArenaSender, SenderFiller}; use clap::Parser; use log::error; @@ -57,6 +57,7 @@ pub fn create_blockchain_reader_task( } pub fn create_compute_runner_task( + arena_sender: EthArenaSender, state_manager: Arc, parameters: &DaveParameters, ) -> JoinHandle<()> { @@ -64,6 +65,7 @@ pub fn create_compute_runner_task( spawn(async move { let mut compute_runner = ComputeRunner::new( + arena_sender, ¶ms.blockchain_config, state_manager, params.sleep_duration, @@ -78,6 +80,7 @@ pub fn create_compute_runner_task( } pub fn create_epoch_manager_task( + client: Arc, state_manager: Arc, parameters: &DaveParameters, ) -> JoinHandle<()> { @@ -85,7 +88,7 @@ pub fn create_epoch_manager_task( spawn(async move { let epoch_manager = EpochManager::new( - ¶ms.blockchain_config, + client, params.address_book.consensus, state_manager, params.sleep_duration, diff --git a/cartesi-rollups/node/dave-rollups/src/main.rs b/cartesi-rollups/node/dave-rollups/src/main.rs index bc1a74b8..19b65d8a 100644 --- a/cartesi-rollups/node/dave-rollups/src/main.rs +++ b/cartesi-rollups/node/dave-rollups/src/main.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use cartesi_prt_core::arena::EthArenaSender; use clap::Parser; use dave_rollups::{ create_blockchain_reader_task, create_compute_runner_task, create_epoch_manager_task, @@ -20,10 +21,14 @@ async fn main() -> Result<()> { ¶meters.path_to_db, )?)?); + let arena_sender = EthArenaSender::new(¶meters.blockchain_config)?; + let client = arena_sender.client(); + let blockchain_reader_task = create_blockchain_reader_task(state_manager.clone(), ¶meters); - let epoch_manager_task = create_epoch_manager_task(state_manager.clone(), ¶meters); + let epoch_manager_task = create_epoch_manager_task(client, state_manager.clone(), ¶meters); let machine_runner_task = create_machine_runner_task(state_manager.clone(), ¶meters); - let compute_runner_task = create_compute_runner_task(state_manager.clone(), ¶meters); + let compute_runner_task = + create_compute_runner_task(arena_sender, state_manager.clone(), ¶meters); let (_blockchain_reader_res, _epoch_manager_res, _machine_runner_res, _compute_runner_res) = futures::join!( blockchain_reader_task, diff --git a/cartesi-rollups/node/epoch-manager/Cargo.toml b/cartesi-rollups/node/epoch-manager/Cargo.toml index c292bd0d..9ef015cc 100644 --- a/cartesi-rollups/node/epoch-manager/Cargo.toml +++ b/cartesi-rollups/node/epoch-manager/Cargo.toml @@ -13,6 +13,9 @@ repository.workspace = true cartesi-dave-contracts = { workspace = true } rollups-state-manager = { workspace = true } cartesi-prt-core = { workspace = true } + alloy = { workspace = true } anyhow = { workspace = true } +log = { workspace = true } +num-traits = { workspace = true } tokio = { workspace = true } diff --git a/cartesi-rollups/node/epoch-manager/src/lib.rs b/cartesi-rollups/node/epoch-manager/src/lib.rs index 2e810b6f..d2f39f75 100644 --- a/cartesi-rollups/node/epoch-manager/src/lib.rs +++ b/cartesi-rollups/node/epoch-manager/src/lib.rs @@ -1,19 +1,18 @@ -use alloy::{ - network::EthereumWallet, providers::ProviderBuilder, signers::local::PrivateKeySigner, - sol_types::private::Address, -}; +use alloy::{hex::ToHexExt, sol_types::private::Address}; use anyhow::Result; -use std::{str::FromStr, sync::Arc, time::Duration}; +use log::{error, info}; +use num_traits::cast::ToPrimitive; +use std::{sync::Arc, time::Duration}; use cartesi_dave_contracts::daveconsensus; -use cartesi_prt_core::arena::{BlockchainConfig, SenderFiller}; +use cartesi_prt_core::arena::SenderFiller; use rollups_state_manager::StateManager; pub struct EpochManager { + client: Arc, consensus: Address, sleep_duration: Duration, state_manager: Arc, - client: Arc, } impl EpochManager @@ -21,28 +20,11 @@ where ::Error: Send + Sync + 'static, { pub fn new( - config: &BlockchainConfig, + client: Arc, consensus_address: Address, state_manager: Arc, sleep_duration: u64, ) -> Self { - let signer = PrivateKeySigner::from_str(config.web3_private_key.as_str()) - .expect("fail to construct signer"); - let wallet = EthereumWallet::from(signer); - - let url = config.web3_rpc_url.parse().expect("fail to parse url"); - let provider = ProviderBuilder::new() - .with_recommended_fillers() - .wallet(wallet) - .with_chain( - config - .web3_chain_id - .try_into() - .expect("fail to convert chain id"), - ) - .on_http(url); - let client = Arc::new(provider); - Self { consensus: consensus_address, sleep_duration: Duration::from_secs(sleep_duration), @@ -57,14 +39,25 @@ where let can_settle = dave_consensus.canSettle().call().await?; if can_settle.isFinished { - match self.state_manager.computation_hash(0)? { + match self.state_manager.computation_hash( + can_settle + .epochNumber + .to_u64() + .expect("fail to convert epoch number to u64"), + )? { Some(computation_hash) => { - dave_consensus - .settle(can_settle.epochNumber) - .send() - .await? - .watch() - .await?; + info!( + "settle epoch {} with claim 0x{}", + can_settle.epochNumber, + computation_hash.encode_hex() + ); + match dave_consensus.settle(can_settle.epochNumber).send().await { + Ok(tx_builder) => { + let _ = tx_builder.watch().await.inspect_err(|e| error!("{}", e)); + } + // allow retry when errors happen + Err(e) => error!("{e}"), + } // TODO: if claim doesn't match, that can be a serious problem, send out alert } None => { @@ -72,7 +65,6 @@ where } } } - tokio::time::sleep(self.sleep_duration).await; } } diff --git a/cartesi-rollups/node/machine-runner/src/error.rs b/cartesi-rollups/node/machine-runner/src/error.rs index ea887fe2..722ab12d 100644 --- a/cartesi-rollups/node/machine-runner/src/error.rs +++ b/cartesi-rollups/node/machine-runner/src/error.rs @@ -14,6 +14,13 @@ pub enum MachineRunnerError { #[from] source: DigestError, }, + + #[error(transparent)] + IO { + #[from] + source: std::io::Error, + }, + #[error(transparent)] Machine { #[from] diff --git a/cartesi-rollups/node/machine-runner/src/lib.rs b/cartesi-rollups/node/machine-runner/src/lib.rs index 72ebfb80..40f9a3a1 100644 --- a/cartesi-rollups/node/machine-runner/src/lib.rs +++ b/cartesi-rollups/node/machine-runner/src/lib.rs @@ -4,16 +4,20 @@ mod error; use alloy::sol_types::private::U256; use error::{MachineRunnerError, Result}; -use std::{path::Path, sync::Arc, time::Duration}; +use std::{ + fs, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; -use cartesi_dave_arithmetic::max_uint; use cartesi_dave_merkle::{Digest, MerkleBuilder}; use cartesi_machine::{break_reason, configuration::RuntimeConfig, htif, machine::Machine}; use cartesi_prt_core::machine::constants::{LOG2_EMULATOR_SPAN, LOG2_INPUT_SPAN, LOG2_UARCH_SPAN}; use rollups_state_manager::{InputId, StateManager}; // gap of each leaf in the commitment tree, should use the same value as CanonicalConstants.sol:log2step(0) -const LOG2_STRIDE: u64 = 49; +const LOG2_STRIDE: u64 = 44; pub struct MachineRunner { machine: Machine, @@ -78,6 +82,8 @@ where .map_err(|e| MachineRunnerError::StateManagerError(e))?; if self.epoch_number == latest_epoch { + // all inputs processed in current epoch + // epoch may still be open, come back later break Ok(()); } else { assert!(self.epoch_number < latest_epoch); @@ -94,6 +100,9 @@ where } fn advance_epoch(&mut self) -> Result<(), SM> { + if self.next_input_index_in_epoch == 0 { + self.take_snapshot()?; + } loop { let next = self .state_manager @@ -114,41 +123,43 @@ where } /// calculate computation hash for `self.epoch_number` - fn build_commitment(&self) -> Result, SM> { + fn build_commitment(&mut self) -> Result, SM> { // get all state hashes with repetitions for `self.epoch_number` - let state_hashes = self + let mut state_hashes = self .state_manager .machine_state_hashes(self.epoch_number) .map_err(|e| MachineRunnerError::StateManagerError(e))?; + let stride_count_in_epoch = + 1 << (LOG2_INPUT_SPAN + LOG2_EMULATOR_SPAN + LOG2_UARCH_SPAN - LOG2_STRIDE); + if state_hashes.len() == 0 { + // no inputs in current epoch, add machine state hash repeatedly + self.add_state_hash(stride_count_in_epoch)?; + state_hashes.push(( + self.machine.get_root_hash()?.as_bytes().to_vec(), + stride_count_in_epoch, + )); + } let computation_hash = { - if state_hashes.len() == 0 { - // no inputs in current epoch, reuse claim from previous epoch - self.state_manager - .computation_hash(self.epoch_number - 1) - .map_err(|e| MachineRunnerError::StateManagerError(e))? - .unwrap() - } else { - let mut builder = MerkleBuilder::default(); - let mut total_repetitions = 0; - for state_hash in &state_hashes { - total_repetitions += state_hash.1; - builder.append_repeated( - Digest::from_digest(&state_hash.0)?, - U256::from(state_hash.1), - ); - } - - let stride_count_in_epoch = - max_uint(LOG2_INPUT_SPAN + LOG2_EMULATOR_SPAN + LOG2_UARCH_SPAN - LOG2_STRIDE); + let mut builder = MerkleBuilder::default(); + let mut total_repetitions = 0; + for state_hash in &state_hashes { + total_repetitions += state_hash.1; + builder.append_repeated( + Digest::from_digest(&state_hash.0)?, + U256::from(state_hash.1), + ); + } + if stride_count_in_epoch > total_repetitions { + self.add_state_hash(stride_count_in_epoch - total_repetitions)?; builder.append_repeated( Digest::from_digest(&state_hashes.last().unwrap().0)?, - U256::from(stride_count_in_epoch - total_repetitions + 1), + U256::from(stride_count_in_epoch - total_repetitions), ); - - let tree = builder.build(); - tree.root_hash().slice().to_vec() } + + let tree = builder.build(); + tree.root_hash().slice().to_vec() }; Ok(computation_hash) @@ -163,8 +174,9 @@ where } fn process_input(&mut self, data: &[u8]) -> Result<(), SM> { - let big_steps_in_stride = max_uint(LOG2_STRIDE - LOG2_UARCH_SPAN); - let stride_count_in_input = max_uint(LOG2_EMULATOR_SPAN + LOG2_UARCH_SPAN - LOG2_STRIDE); + // TODO: review caclulations + let big_steps_in_stride = 1 << (LOG2_STRIDE - LOG2_UARCH_SPAN); + let stride_count_in_input = 1 << (LOG2_EMULATOR_SPAN + LOG2_UARCH_SPAN - LOG2_STRIDE); self.feed_input(data)?; self.run_machine(big_steps_in_stride)?; @@ -215,6 +227,31 @@ where Ok(()) } + + fn take_snapshot(&self) -> Result<(), SM> { + let epoch_path = PathBuf::from(format!("/rollups_data/{}", self.epoch_number)); + let snapshot_path = epoch_path.join(format!( + "{}", + self.next_input_index_in_epoch << LOG2_EMULATOR_SPAN + )); + if !epoch_path.exists() { + fs::create_dir_all(&epoch_path)?; + } + if !snapshot_path.exists() { + self.state_manager + .add_snapshot( + snapshot_path + .to_str() + .expect("fail to convert snapshot path"), + self.epoch_number, + self.next_input_index_in_epoch, + ) + .map_err(|e| MachineRunnerError::StateManagerError(e))?; + self.machine.store(&snapshot_path)?; + } + + Ok(()) + } } #[cfg(test)] @@ -266,8 +303,8 @@ mod tests { Ok(self.inputs.len() as u64) } - fn last_epoch(&self) -> Result> { - panic!("last_epoch not implemented in mock version"); + fn last_sealed_epoch(&self) -> Result> { + panic!("last_sealed_epoch not implemented in mock version"); } fn input(&self, id: &InputId) -> Result> { diff --git a/cartesi-rollups/node/state-manager/src/lib.rs b/cartesi-rollups/node/state-manager/src/lib.rs index d1b9d978..5ec7eee6 100644 --- a/cartesi-rollups/node/state-manager/src/lib.rs +++ b/cartesi-rollups/node/state-manager/src/lib.rs @@ -83,7 +83,7 @@ pub trait StateManager { fn epoch(&self, epoch_number: u64) -> Result, Self::Error>; fn epoch_count(&self) -> Result; - fn last_epoch(&self) -> Result, Self::Error>; + fn last_sealed_epoch(&self) -> Result, Self::Error>; fn input(&self, id: &InputId) -> Result, Self::Error>; fn inputs(&self, epoch_number: u64) -> Result>, Self::Error>; fn input_count(&self, epoch_number: u64) -> Result; diff --git a/cartesi-rollups/node/state-manager/src/persistent_state_access.rs b/cartesi-rollups/node/state-manager/src/persistent_state_access.rs index 429c78f0..2d97c763 100644 --- a/cartesi-rollups/node/state-manager/src/persistent_state_access.rs +++ b/cartesi-rollups/node/state-manager/src/persistent_state_access.rs @@ -37,9 +37,9 @@ impl StateManager for PersistentStateAccess { consensus_data::epoch_count(&conn) } - fn last_epoch(&self) -> Result> { + fn last_sealed_epoch(&self) -> Result> { let conn = self.connection.lock().unwrap(); - consensus_data::last_epoch(&conn) + consensus_data::last_sealed_epoch(&conn) } fn input(&self, id: &InputId) -> Result> { @@ -279,12 +279,6 @@ impl StateManager for PersistentStateAccess { res.push(row?); } - if res.len() == 0 { - return Err(PersistentStateAccessError::DataNotFound { - description: "machine state hash doesn't exist".to_owned(), - }); - } - Ok(res) } diff --git a/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs b/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs index aa7c04de..7cff76b7 100644 --- a/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs +++ b/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs @@ -201,7 +201,7 @@ fn insert_epoch_statement<'a>(conn: &'a rusqlite::Connection) -> Result Result> { +pub fn last_sealed_epoch(conn: &rusqlite::Connection) -> Result> { let mut stmt = conn.prepare( "\ SELECT epoch_number, epoch_boundary, root_tournament FROM epochs diff --git a/common-rs/merkle/Cargo.toml b/common-rs/merkle/Cargo.toml index 59a9e458..c523dc6f 100644 --- a/common-rs/merkle/Cargo.toml +++ b/common-rs/merkle/Cargo.toml @@ -11,7 +11,7 @@ readme = { workspace = true } repository = { workspace = true } [dependencies] -alloy = { version = "0.3.1", features = ["sol-types"] } +alloy = { version = "0.8.0", features = ["sol-types"] } hex = "0.4" ruint = "1.12" sha3 = "0.10" diff --git a/common-rs/merkle/src/digest/mod.rs b/common-rs/merkle/src/digest/mod.rs index 05680ab4..d90c8bcc 100644 --- a/common-rs/merkle/src/digest/mod.rs +++ b/common-rs/merkle/src/digest/mod.rs @@ -62,7 +62,7 @@ impl Digest { /// Converts the [Digest] to a hexadecimal string. pub fn to_hex(&self) -> String { - hex::encode(self.data) + format!("0x{}", hex::encode(self.data)) } /// Checks if the [Digest] is zeroed. diff --git a/machine/rust-bindings/cartesi-machine/src/machine.rs b/machine/rust-bindings/cartesi-machine/src/machine.rs index f6e63ac6..ee72c391 100644 --- a/machine/rust-bindings/cartesi-machine/src/machine.rs +++ b/machine/rust-bindings/cartesi-machine/src/machine.rs @@ -137,6 +137,35 @@ impl Machine { Ok(()) } + /// Write a CMIO response logging all accesses to the state. + pub fn log_send_cmio_response( + &mut self, + reason: u16, + data: &[u8], + log_type: log::AccessLogType, + one_based: bool, + ) -> Result { + let mut error_collector = ErrorCollector::new(); + let mut access_log = std::ptr::null_mut(); + + let result = unsafe { + cartesi_machine_sys::cm_log_send_cmio_response( + self.machine, + reason, + data.as_ptr(), + data.len(), + log_type.into(), + one_based, + &mut access_log, + error_collector.as_mut_ptr(), + ) + }; + + error_collector.collect(result)?; + + Ok(log::AccessLog::new(access_log)) + } + /// Runs the machine for one micro cycle logging all accesses to the state. pub fn log_uarch_step( &mut self, @@ -160,7 +189,7 @@ impl Machine { Ok(log::AccessLog::new(access_log)) } - /// Checks the internal consistency of an access log + /// Checks the internal consistency of an access log produced by cm_log_uarch_step pub fn verify_uarch_step_log( &mut self, log: &log::AccessLog, @@ -231,7 +260,7 @@ impl Machine { Ok(()) } - /// Checks the internal consistency of an access log produced by cm_log_uarch_step + /// Checks the internal consistency of an access log produced by cm_log_uarch_reset pub fn verify_uarch_reset_log( &mut self, log: &log::AccessLog, diff --git a/machine/step b/machine/step index e3f1251e..7e1574e8 160000 --- a/machine/step +++ b/machine/step @@ -1 +1 @@ -Subproject commit e3f1251e7619bcc073ffe51e784cc36d464edf98 +Subproject commit 7e1574e8b5259e1881691e37ff287f66859c86c5 diff --git a/prt/client-lua/computation/commitment.lua b/prt/client-lua/computation/commitment.lua index da322ed6..bf1e6d20 100644 --- a/prt/client-lua/computation/commitment.lua +++ b/prt/client-lua/computation/commitment.lua @@ -6,7 +6,20 @@ local consts = require "computation.constants" local ulte = arithmetic.ulte -local save_snapshot = true +local handle_rollups = false + + +local function print_flush_same_line(args_str) + io.write(string.format("\r%s", args_str)) + -- Flush the output to ensure it appears immediately + io.flush() +end + +local function finish_print_flush_same_line() + io.write("\n") + -- Flush the output to ensure it appears immediately + io.flush() +end local function run_uarch_span(machine) assert(machine.ucycle == 0) @@ -31,76 +44,90 @@ local function run_uarch_span(machine) machine_state = machine:ureset() builder:add(machine_state.root_hash) - return builder:build() + return builder:build(), machine_state end -local function build_small_machine_commitment(base_cycle, log2_stride_count, machine, snapshot_dir) - local machine_state = machine:run(base_cycle) - if save_snapshot then - -- taking snapshot for leafs to save time in next level - machine:snapshot(snapshot_dir, base_cycle) - end - local initial_state = machine_state.root_hash - +local function build_small_machine_commitment(log2_stride_count, machine, initial_state, snapshot_dir) local builder = MerkleBuilder:new() local instruction_count = arithmetic.max_uint(log2_stride_count - consts.log2_uarch_span) local instruction = 0 while ulte(instruction, instruction_count) do - builder:add(run_uarch_span(machine)) + print_flush_same_line(string.format( + "building small machine commitment (%d/%d)...", + instruction, instruction_count + )) + + local uarch_span, machine_state = run_uarch_span(machine) + builder:add(uarch_span) instruction = instruction + 1 -- Optional optimization, just comment to remove. - if machine:state().halted then - builder:add(run_uarch_span(machine), instruction_count - instruction + 1) + if machine_state.halted or machine_state.yielded then + uarch_span, _ = run_uarch_span(machine) + builder:add(uarch_span, instruction_count - instruction + 1) break end end + finish_print_flush_same_line() return initial_state, builder:build(initial_state) end -local function build_big_machine_commitment(base_cycle, log2_stride, log2_stride_count, machine, snapshot_dir) - local machine_state = machine:run(base_cycle) - if save_snapshot then - -- taking snapshot for leafs to save time in next level - machine:snapshot(snapshot_dir, base_cycle) - end - local initial_state = machine_state.root_hash - +local function build_big_machine_commitment(base_cycle, log2_stride, log2_stride_count, machine, initial_state) local builder = MerkleBuilder:new() local instruction_count = arithmetic.max_uint(log2_stride_count) local instruction = 0 while ulte(instruction, instruction_count) do + print_flush_same_line(string.format( + "building big machine commitment (%d/%d)...", + instruction, instruction_count + )) + local cycle = ((instruction + 1) << (log2_stride - consts.log2_uarch_span)) - machine_state = machine:run(base_cycle + cycle) + local machine_state = machine:run(base_cycle + cycle) - if not machine_state.halted then - builder:add(machine_state.root_hash) - instruction = instruction + 1 - else + if machine_state.halted or machine_state.yielded then -- add this loop plus all remainings builder:add(machine_state.root_hash, instruction_count - instruction + 1) break + else + builder:add(machine_state.root_hash) + instruction = instruction + 1 end end + finish_print_flush_same_line() return initial_state, builder:build(initial_state) end -local function build_commitment(base_cycle, log2_stride, log2_stride_count, machine_path, snapshot_dir) +local function build_commitment(base_cycle, log2_stride, log2_stride_count, machine_path, snapshot_dir, inputs) local machine = Machine:new_from_path(machine_path) machine:load_snapshot(snapshot_dir, base_cycle) + local initial_state + if inputs then + -- treat it as rollups + -- the base_cycle may be the cycle to receive input, + -- we need to take the initial state before feeding input to the machine + handle_rollups = true + initial_state = machine:run_with_inputs(base_cycle, inputs, snapshot_dir).root_hash + else + -- treat it as compute + handle_rollups = false + initial_state = machine:run(base_cycle).root_hash -- taking snapshot for leafs to save time in next level + machine:take_snapshot(snapshot_dir, base_cycle, handle_rollups) + end + if log2_stride >= consts.log2_uarch_span then assert( log2_stride + log2_stride_count <= - consts.log2_emulator_span + consts.log2_uarch_span + consts.log2_input_span + consts.log2_emulator_span + consts.log2_uarch_span ) - return build_big_machine_commitment(base_cycle, log2_stride, log2_stride_count, machine, snapshot_dir) + return build_big_machine_commitment(base_cycle, log2_stride, log2_stride_count, machine, initial_state) else assert(log2_stride == 0) - return build_small_machine_commitment(base_cycle, log2_stride_count, machine, snapshot_dir) + return build_small_machine_commitment(log2_stride_count, machine, initial_state, snapshot_dir) end end @@ -120,7 +147,7 @@ function CommitmentBuilder:new(machine_path, snapshot_dir, root_commitment) return c end -function CommitmentBuilder:build(base_cycle, level, log2_stride, log2_stride_count) +function CommitmentBuilder:build(base_cycle, level, log2_stride, log2_stride_count, inputs) if not self.commitments[level] then self.commitments[level] = {} elseif self.commitments[level][base_cycle] then @@ -128,7 +155,7 @@ function CommitmentBuilder:build(base_cycle, level, log2_stride, log2_stride_cou end local _, commitment = build_commitment(base_cycle, log2_stride, log2_stride_count, self.machine_path, - self.snapshot_dir) + self.snapshot_dir, inputs) self.commitments[level][base_cycle] = commitment return commitment end diff --git a/prt/client-lua/computation/constants.lua b/prt/client-lua/computation/constants.lua index 3fec0c57..6f815240 100644 --- a/prt/client-lua/computation/constants.lua +++ b/prt/client-lua/computation/constants.lua @@ -2,6 +2,7 @@ local arithmetic = require "utils.arithmetic" local log2_uarch_span = 20 local log2_emulator_span = 48 +local log2_input_span = 24 local constants = { log2_uarch_span = log2_uarch_span, @@ -9,6 +10,9 @@ local constants = { log2_emulator_span = log2_emulator_span, emulator_span = arithmetic.max_uint(log2_emulator_span), + + log2_input_span = log2_input_span, + input_span = arithmetic.max_uint(log2_input_span), } return constants diff --git a/prt/client-lua/computation/machine.lua b/prt/client-lua/computation/machine.lua index 2e789e0b..2833dc13 100644 --- a/prt/client-lua/computation/machine.lua +++ b/prt/client-lua/computation/machine.lua @@ -7,10 +7,11 @@ local helper = require "utils.helper" local ComputationState = {} ComputationState.__index = ComputationState -function ComputationState:new(root_hash, halted, uhalted) +function ComputationState:new(root_hash, halted, yielded, uhalted) local r = { root_hash = root_hash, halted = halted, + yielded = yielded, uhalted = uhalted } setmetatable(r, self) @@ -22,15 +23,17 @@ function ComputationState.from_current_machine_state(machine) return ComputationState:new( hash, machine:read_iflags_H(), + machine:read_iflags_Y(), machine:read_uarch_halt_flag() ) end ComputationState.__tostring = function(x) return string.format( - "{root_hash = %s, halted = %s, uhalted = %s}", + "{root_hash = %s, halted = %s, yielded = %s, uhalted = %s}", x.root_hash, x.halted, + x.yielded, x.uhalted ) end @@ -93,6 +96,7 @@ local function find_closest_snapshot(path, current_cycle, cycle) -- Binary search for the closest number smaller than target cycle local closest_dir = nil + local closest_cycle = nil local low, high = 1, #directories while low <= high do @@ -101,16 +105,28 @@ local function find_closest_snapshot(path, current_cycle, cycle) if mid_number < cycle and mid_number > current_cycle then closest_dir = directories[mid].path + closest_cycle = directories[mid].number low = mid + 1 -- Search in the larger half else high = mid - 1 -- Search in the smaller half end end - return closest_dir + return closest_cycle, closest_dir end -function Machine:snapshot(snapshot_dir, cycle) + +local function to256BitHex(num) -- Pad the hex string with leading zeros to ensure it's 64 characters long (256 bits) + return string.format("%064x", num) +end + +function Machine:take_snapshot(snapshot_dir, cycle, handle_rollups) + local input_mask = arithmetic.max_uint(consts.log2_emulator_span) + if handle_rollups and cycle & input_mask == 0 then + -- dont snapshot a machine state that's freshly fed with input without advance + assert(not self.yielded, "don't snapshot a machine state that's freshly fed with input without advance") + end + if helper.exists(snapshot_dir) then local snapshot_path = snapshot_dir .. "/" .. tostring(cycle) @@ -122,15 +138,17 @@ function Machine:snapshot(snapshot_dir, cycle) end function Machine:load_snapshot(snapshot_dir, cycle) + local snapshot_cycle = cycle local snapshot_path = snapshot_dir .. "/" .. tostring(cycle) if not helper.exists(snapshot_path) then -- find closest snapshot if direct snapshot doesn't exists - snapshot_path = find_closest_snapshot(snapshot_dir, self.cycle, cycle) + snapshot_cycle, snapshot_path = find_closest_snapshot(snapshot_dir, self.cycle, cycle) end if snapshot_path then + print(string.format("load snapshot from %s", snapshot_path)) local machine = cartesi.machine(snapshot_path, machine_settings) - self.cycle = machine:read_mcycle() - self.start_cycle + self.cycle = snapshot_cycle self.machine = machine end end @@ -145,10 +163,12 @@ end function Machine:run(cycle) assert(arithmetic.ulte(self.cycle, cycle)) - local physical_cycle = add_and_clamp(self.start_cycle, cycle) -- TODO reconsider for lambda local machine = self.machine - while not (machine:read_iflags_H() or machine:read_mcycle() == physical_cycle) do + local mcycle = machine:read_mcycle() + local physical_cycle = add_and_clamp(mcycle, cycle - self.cycle) -- TODO reconsider for lambda + + while not (machine:read_iflags_H() or machine:read_iflags_Y() or machine:read_mcycle() == physical_cycle) do machine:run(physical_cycle) end @@ -163,6 +183,46 @@ function Machine:run_uarch(ucycle) self.ucycle = ucycle end +function Machine:run_with_inputs(cycle, inputs, snapshot_dir) + local input_mask = arithmetic.max_uint(consts.log2_emulator_span) + local current_input_index = self.cycle >> consts.log2_emulator_span + + local next_input_index + local machine_state_without_input = self:state() + + if self.cycle & input_mask == 0 then + next_input_index = current_input_index + else + next_input_index = current_input_index + 1 + end + local next_input_cycle = next_input_index << consts.log2_emulator_span + + while next_input_cycle <= cycle do + machine_state_without_input = self:run(next_input_cycle) + if next_input_cycle == cycle then + self:take_snapshot(snapshot_dir, next_input_cycle, true) + end + local input = inputs[next_input_index + 1] + if input then + local h = assert(input:match("0x(%x+)"), input) + local data_hex = (h:gsub('..', function(cc) + return string.char(tonumber(cc, 16)) + end)) + self.machine:send_cmio_response(cartesi.machine.HTIF_YIELD_REASON_ADVANCE_STATE, data_hex); + end + + next_input_index = next_input_index + 1 + next_input_cycle = next_input_index << consts.log2_emulator_span + end + + if cycle > self.cycle then + machine_state_without_input = self:run(cycle) + self:take_snapshot(snapshot_dir, cycle, true) + end + + return machine_state_without_input +end + function Machine:increment_uarch() self.machine:run_uarch(self.ucycle + 1) self.ucycle = self.ucycle + 1 @@ -200,30 +260,22 @@ local function ver(t, p, s) return t end -function Machine.get_logs(path, snapshot_dir, cycle, ucycle) - local machine = Machine:new_from_path(path) - machine:load_snapshot(snapshot_dir, cycle) - local logs - machine:run(cycle) - machine:run_uarch(ucycle) - - if ucycle == consts.uarch_span then - logs = machine.machine:log_uarch_reset { annotations = true, proofs = true } - else - logs = machine.machine:log_uarch_step { annotations = true, proofs = true } - end +local bint = require 'utils.bint' (256) -- use 256 bits integers +local function encode_access_logs(logs, encode_input) local encoded = {} - for _, a in ipairs(logs.accesses) do - if a.log2_size == 3 then - table.insert(encoded, a.read) - else - table.insert(encoded, a.read_hash) - end + for _, log in ipairs(logs) do + for _, a in ipairs(log.accesses) do + if a.log2_size == 3 then + table.insert(encoded, a.read) + else + table.insert(encoded, a.read_hash) + end - for _, h in ipairs(a.sibling_hashes) do - table.insert(encoded, h) + for _, h in ipairs(a.sibling_hashes) do + table.insert(encoded, h) + end end end @@ -232,7 +284,76 @@ function Machine.get_logs(path, snapshot_dir, cycle, ucycle) return string.format('%02x', string.byte(c)) end)) - return '"' .. hex_data .. '"' + local res + if encode_input then + assert(#encode_input >= 2) + res = "0x" .. to256BitHex((#encode_input - 2) / 2) + if #encode_input > 2 then + res = res .. string.sub(encode_input, 3, #encode_input) + end + res = res .. string.sub(hex_data, 3, #hex_data) + else + res = hex_data + end + return '"' .. res .. '"' +end + +function Machine.get_logs(path, snapshot_dir, cycle, ucycle, inputs) + local machine = Machine:new_from_path(path) + machine:load_snapshot(snapshot_dir, cycle) + local logs = {} + local log_type = { annotations = true, proofs = true } + local encode_input = nil + if inputs then + -- treat it as rollups + -- the cycle may be the cycle to receive input, + -- we need to include the process of feeding input to the machine in the log + if cycle == 0 then + machine:run(cycle) + else + machine:run_with_inputs(cycle - 1, inputs, snapshot_dir) + machine:run(cycle) + end + + local mask = arithmetic.max_uint(consts.log2_emulator_span); + -- lua is one based + local input = inputs[(cycle >> consts.log2_emulator_span) + 1] + if cycle & mask == 0 then + if input then + local h = assert(input:match("0x(%x+)"), input) + local data_hex = (h:gsub('..', function(cc) + return string.char(tonumber(cc, 16)) + end)) + -- need to process input + if ucycle == 0 then + -- need to log cmio + table.insert(logs, + machine.machine:log_send_cmio_response(cartesi.machine.HTIF_YIELD_REASON_ADVANCE_STATE, data_hex, + log_type + )) + table.insert(logs, machine.machine:log_uarch_step(log_type)) + return encode_access_logs(logs, input) + else + machine.machine:send_cmio_response(cartesi.machine.HTIF_YIELD_REASON_ADVANCE_STATE, input) + end + else + if ucycle == 0 then + encode_input = "0x" + end + end + end + else + -- treat it as compute + machine:run(cycle) + end + + machine:run_uarch(ucycle) + if ucycle == consts.uarch_span then + table.insert(logs, machine.machine:log_uarch_reset(log_type)) + else + table.insert(logs, machine.machine:log_uarch_step(log_type)) + end + return encode_access_logs(logs, encode_input) end return Machine diff --git a/prt/client-lua/player/reader.lua b/prt/client-lua/player/reader.lua index b3beb935..bd2a0321 100644 --- a/prt/client-lua/player/reader.lua +++ b/prt/client-lua/player/reader.lua @@ -272,6 +272,24 @@ function Reader:read_commitment_joined(tournament_address) return ret end +function Reader:read_tournament_created(tournament_address, match_id_hash) + local sig = "newInnerTournament(bytes32,address)" + local data_sig = "(address)" + + local logs = self:_read_logs(tournament_address, sig, { match_id_hash:hex_string(), false, false }, data_sig) + assert(#logs <= 1) + + if #logs == 0 then return false end + local log = logs[1] + + local ret = { + parent_match = match_id_hash, + new_tournament = log.decoded_data[1], + } + + return ret +end + function Reader:read_commitment(tournament_address, commitment_hash) local sig = "getCommitment(bytes32)((uint64,uint64),bytes32)" @@ -310,24 +328,6 @@ function Reader:read_constants(tournament_address) return constants end -function Reader:read_tournament_created(tournament_address, match_id_hash) - local sig = "newInnerTournament(bytes32,address)" - local data_sig = "(address)" - - local logs = self:_read_logs(tournament_address, sig, { match_id_hash:hex_string(), false, false }, data_sig) - assert(#logs <= 1) - - if #logs == 0 then return false end - local log = logs[1] - - local ret = { - parent_match = match_id_hash, - new_tournament = log.decoded_data[1], - } - - return ret -end - function Reader:read_cycle(address, match_id_hash) local sig = "getMatchCycle(bytes32)(uint256)" local ret = self:_call(address, sig, { match_id_hash:hex_string() }) diff --git a/prt/client-lua/player/strategy.lua b/prt/client-lua/player/strategy.lua index c4d43fe8..fd2b2387 100644 --- a/prt/client-lua/player/strategy.lua +++ b/prt/client-lua/player/strategy.lua @@ -6,11 +6,12 @@ local GarbageCollector = require "player.gc" local HonestStrategy = {} HonestStrategy.__index = HonestStrategy -function HonestStrategy:new(commitment_builder, machine_path, sender) +function HonestStrategy:new(commitment_builder, inputs, machine_path, sender) local gc_strategy = GarbageCollector:new(sender) local honest_strategy = { commitment_builder = commitment_builder, + inputs = inputs, machine_path = machine_path, sender = sender, gc_strategy = gc_strategy, @@ -58,7 +59,7 @@ local function _is_my_turn(match, commitment) -- commitment one should be the first to react after the match is created -- thus commitment one will hold the same parity as the match height (not xor) -- and commitment two will hold the opposite parity (xor) - local res = false + local res local height_parity = match.tournament.log2_stride_count % 2 == 0 local current_height_parity = match.current_height % 2 == 0 local xor_of_two_parities = height_parity ~= current_height_parity @@ -126,7 +127,8 @@ function HonestStrategy:_react_match(match, commitment, log) local cycle = match.base_big_cycle local ucycle = (match.leaf_cycle & constants.uarch_span):touinteger() - local logs = Machine.get_logs(self.machine_path, self.commitment_builder.snapshot_dir, cycle, ucycle) + local logs = Machine.get_logs(self.machine_path, self.commitment_builder.snapshot_dir, cycle, ucycle, + self.inputs) helper.log_full(self.sender.index, string.format( "win leaf match in tournament %s of level %d for commitment %s", @@ -279,7 +281,8 @@ function HonestStrategy:_react_tournament(tournament, log) tournament.base_big_cycle, tournament.level, tournament.log2_stride, - tournament.log2_stride_count + tournament.log2_stride_count, + self.inputs ) table.insert(log.tournaments, tournament) @@ -297,7 +300,8 @@ function HonestStrategy:_react_tournament(tournament, log) tournament.parent.base_big_cycle, tournament.parent.level, tournament.parent.log2_stride, - tournament.parent.log2_stride_count + tournament.parent.log2_stride_count, + self.inputs ) if tournament_winner.commitment ~= old_commitment then helper.log_full(self.sender.index, "player lost tournament") diff --git a/prt/client-lua/utils/helper.lua b/prt/client-lua/utils/helper.lua index 28394ee1..988d4752 100644 --- a/prt/client-lua/utils/helper.lua +++ b/prt/client-lua/utils/helper.lua @@ -1,8 +1,6 @@ local color = require "utils.color" local names = { 'green', 'yellow', 'blue', 'pink', 'cyan', 'white' } -local idle_template = [[ls player%d_idle 2>/dev/null | grep player%d_idle | wc -l]] -local ps_template = [[ps %s | grep defunct | wc -l]] local helper = {} function helper.parse_datetime(datetime_str) @@ -137,4 +135,18 @@ function helper.is_pid_alive(pid) return false -- Returns false if the process is not alive end +-- Function to create a directory and its parents using os.execute +function helper.mkdir_p(path) + -- Use os.execute to call the mkdir command with -p option + local command = "mkdir -p " .. path + local result = os.execute(command) + + -- Check if the command was successful + if result then + print("Directory created successfully: " .. path) + else + print("Failed to create directory: " .. path) + end +end + return helper diff --git a/prt/client-rs/Cargo.toml b/prt/client-rs/Cargo.toml index 4f2c01de..e4295005 100644 --- a/prt/client-rs/Cargo.toml +++ b/prt/client-rs/Cargo.toml @@ -24,7 +24,7 @@ anyhow = "1.0" async-recursion = "1" async-trait = "0.1" clap = { version = "4.5", features = ["derive", "env"] } -alloy = { version = "0.3.1", features = ["sol-types", "contract", "network", "reqwest", "signers", "signer-local"] } +alloy = { version = "0.8.0", features = ["sol-types", "contract", "network", "reqwest", "signers", "signer-local"] } lazy_static = "1.4.0" log = "0.4" hex = "0.4.3" diff --git a/prt/client-rs/src/arena/sender.rs b/prt/client-rs/src/arena/sender.rs index b89b386b..663059c0 100644 --- a/prt/client-rs/src/arena/sender.rs +++ b/prt/client-rs/src/arena/sender.rs @@ -2,6 +2,7 @@ //! to tournaments use async_trait::async_trait; +use log::trace; use std::{str::FromStr, sync::Arc}; use alloy::{ @@ -75,6 +76,10 @@ impl EthArenaSender { }) } + pub fn client(&self) -> Arc { + self.client.clone() + } + pub async fn nonce(&self) -> std::result::Result> { Ok(self .client @@ -165,6 +170,11 @@ impl ArenaSender for EthArenaSender { .iter() .map(|h| -> B256 { (*h).into() }) .collect(); + trace!( + "final state for tournament {} at position {}", + proof.node, + proof.position + ); tournament .joinTournament( proof.node.into(), diff --git a/prt/client-rs/src/db/dispute_state_access.rs b/prt/client-rs/src/db/compute_state_access.rs similarity index 60% rename from prt/client-rs/src/db/dispute_state_access.rs rename to prt/client-rs/src/db/compute_state_access.rs index 27923249..69c5ed8a 100644 --- a/prt/client-rs/src/db/dispute_state_access.rs +++ b/prt/client-rs/src/db/compute_state_access.rs @@ -1,7 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use crate::db::sql::{dispute_data, error::*, migrations}; +use crate::db::sql::{compute_data, error::*, migrations}; use cartesi_dave_merkle::{Digest, MerkleBuilder, MerkleTree}; use alloy::hex as alloy_hex; @@ -16,20 +16,20 @@ use std::{ #[derive(Debug, Serialize, Deserialize)] pub struct InputsAndLeafs { - #[serde(default)] - inputs: Vec, + inputs: Option>, leafs: Vec, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Default)] pub struct Input(#[serde(with = "alloy_hex::serde")] pub Vec); -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct Leaf(#[serde(with = "alloy_hex::serde")] pub [u8; 32], pub u64); #[derive(Debug)] -pub struct DisputeStateAccess { +pub struct ComputeStateAccess { connection: Mutex, + pub handle_rollups: bool, pub work_path: PathBuf, } @@ -44,32 +44,37 @@ fn read_json_file(file_path: &Path) -> Result { Ok(data) } -impl DisputeStateAccess { +impl ComputeStateAccess { pub fn new( - inputs: Vec, + inputs: Option>, leafs: Vec, root_tournament: String, - dispute_data_path: &str, + compute_data_path: &str, ) -> Result { // initialize the database if it doesn't exist // fill the database from a json-format file, or the parameters - // the database should be "/dispute_data/0x_root_tournament_address/db" - // the json file should be "/dispute_data/0x_root_tournament_address/inputs_and_leafs.json" - let work_dir = format!("{dispute_data_path}/{root_tournament}"); + // the database should be "/compute_data/0x_root_tournament_address/db" + // the json file should be "/compute_data/0x_root_tournament_address/inputs_and_leafs.json" + let work_dir = format!("{compute_data_path}/{root_tournament}"); let work_path = PathBuf::from(work_dir); + if !work_path.exists() { + fs::create_dir_all(&work_path)?; + } let db_path = work_path.join("db"); let no_create_flags = OpenFlags::default() & !OpenFlags::SQLITE_OPEN_CREATE; + let handle_rollups; match Connection::open_with_flags(&db_path, no_create_flags) { // database already exists, return it Ok(connection) => { + handle_rollups = compute_data::handle_rollups(&connection)?; return Ok(Self { connection: Mutex::new(connection), + handle_rollups, work_path, - }) + }); } Err(_) => { - // create new database - info!("create new database"); + info!("create new database for dispute"); let mut connection = Connection::open(&db_path)?; migrations::migrate_to_latest(&mut connection).unwrap(); @@ -77,17 +82,21 @@ impl DisputeStateAccess { // prioritize json file over parameters match read_json_file(&json_path) { Ok(inputs_and_leafs) => { - dispute_data::insert_dispute_data( + handle_rollups = inputs_and_leafs.inputs.is_some(); + compute_data::insert_handle_rollups(&connection, handle_rollups)?; + compute_data::insert_compute_data( &connection, - inputs_and_leafs.inputs.iter(), + inputs_and_leafs.inputs.unwrap_or_default().iter(), inputs_and_leafs.leafs.iter(), )?; } Err(_) => { info!("load inputs and leafs from parameters"); - dispute_data::insert_dispute_data( + handle_rollups = inputs.is_some(); + compute_data::insert_handle_rollups(&connection, handle_rollups)?; + compute_data::insert_compute_data( &connection, - inputs.iter(), + inputs.unwrap_or_default().iter(), leafs.iter(), )?; } @@ -95,6 +104,7 @@ impl DisputeStateAccess { Ok(Self { connection: Mutex::new(connection), + handle_rollups, work_path, }) } @@ -103,7 +113,12 @@ impl DisputeStateAccess { pub fn input(&self, id: u64) -> Result>> { let conn = self.connection.lock().unwrap(); - dispute_data::input(&conn, id) + compute_data::input(&conn, id) + } + + pub fn inputs(&self) -> Result>> { + let conn = self.connection.lock().unwrap(); + compute_data::inputs(&conn) } pub fn insert_compute_leafs<'a>( @@ -113,7 +128,7 @@ impl DisputeStateAccess { leafs: impl Iterator, ) -> Result<()> { let conn = self.connection.lock().unwrap(); - dispute_data::insert_compute_leafs(&conn, level, base_cycle, leafs) + compute_data::insert_compute_leafs(&conn, level, base_cycle, leafs) } pub fn compute_leafs( @@ -122,11 +137,11 @@ impl DisputeStateAccess { base_cycle: u64, ) -> Result, u64)>> { let conn = self.connection.lock().unwrap(); - let leafs = dispute_data::compute_leafs(&conn, level, base_cycle)?; + let leafs = compute_data::compute_leafs(&conn, level, base_cycle)?; let mut tree = Vec::new(); for leaf in leafs { - let tree_leafs = dispute_data::compute_tree(&conn, &leaf.0)?; + let tree_leafs = compute_data::compute_tree(&conn, &leaf.0)?; if tree_leafs.len() > 0 { // if leaf is also tree, rebuild it from nested leafs let mut builder = MerkleBuilder::default(); @@ -142,16 +157,38 @@ impl DisputeStateAccess { Ok(tree) } + pub fn insert_compute_trees<'a>( + &self, + compute_trees: impl Iterator)>, + ) -> Result<()> { + let conn = self.connection.lock().unwrap(); + let tx = conn.unchecked_transaction()?; + for (_, digest_and_leaf) in compute_trees.enumerate() { + compute_data::insert_compute_tree( + &tx, + digest_and_leaf.0.slice(), + digest_and_leaf.1.iter(), + )?; + } + tx.commit()?; + + Ok(()) + } + pub fn insert_compute_tree<'a>( &self, tree_root: &[u8], tree_leafs: impl Iterator, ) -> Result<()> { let conn = self.connection.lock().unwrap(); - dispute_data::insert_compute_tree(&conn, tree_root, tree_leafs) + let tx = conn.unchecked_transaction()?; + compute_data::insert_compute_tree(&tx, tree_root, tree_leafs)?; + tx.commit()?; + + Ok(()) } - pub fn closest_snapshot(&self, base_cycle: u64) -> Result> { + pub fn closest_snapshot(&self, base_cycle: u64) -> Result> { let mut snapshots = Vec::new(); // iterate through the snapshot directory, find the one whose cycle number is closest to the base_cycle @@ -175,12 +212,26 @@ impl DisputeStateAccess { .binary_search_by_key(&base_cycle, |k| k.0) .unwrap_or_else(|x| if x > 0 { x - 1 } else { x }); - Ok(snapshots.get(pos).map(|t| t.1.clone())) + let snapshot = { + match snapshots.get(pos) { + Some(t) => { + if t.0 > base_cycle { + None + } else { + Some(t.clone()) + } + } + // snapshots.get(pos).map(|t| t.clone()), + None => None, + } + }; + + Ok(snapshot) } } #[cfg(test)] -mod dispute_state_access_tests { +mod compute_state_access_tests { use super::*; fn create_directory(path: &Path) -> std::io::Result<()> { @@ -197,6 +248,9 @@ mod dispute_state_access_tests { fn test_access_sequentially() { test_compute_tree(); test_closest_snapshot(); + test_compute_or_rollups_true(); + test_compute_or_rollups_false(); + test_none_match(); } fn test_closest_snapshot() { @@ -205,7 +259,7 @@ mod dispute_state_access_tests { create_directory(&work_dir).unwrap(); { let access = - DisputeStateAccess::new(Vec::new(), Vec::new(), String::from("0x12345678"), "/tmp") + ComputeStateAccess::new(None, Vec::new(), String::from("0x12345678"), "/tmp") .unwrap(); assert_eq!(access.closest_snapshot(0).unwrap(), None); @@ -222,50 +276,81 @@ mod dispute_state_access_tests { assert_eq!( access.closest_snapshot(100).unwrap(), - Some(access.work_path.join(format!("99"))) + Some((99, access.work_path.join(format!("99")))) ); assert_eq!( access.closest_snapshot(150).unwrap(), - Some(access.work_path.join(format!("150"))) + Some((150, access.work_path.join(format!("150")))) ); assert_eq!( access.closest_snapshot(200).unwrap(), - Some(access.work_path.join(format!("200"))) + Some((200, access.work_path.join(format!("200")))) ); assert_eq!( access.closest_snapshot(300).unwrap(), - Some(access.work_path.join(format!("300"))) + Some((300, access.work_path.join(format!("300")))) ); assert_eq!( access.closest_snapshot(7).unwrap(), - Some(access.work_path.join(format!("5"))) + Some((5, access.work_path.join(format!("5")))) ); assert_eq!( access.closest_snapshot(10000).unwrap(), - Some(access.work_path.join(format!("300"))) + Some((300, access.work_path.join(format!("300")))) ); assert_eq!( access.closest_snapshot(100000).unwrap(), - Some(access.work_path.join(format!("99999"))) + Some((99999, access.work_path.join(format!("99999")))) ); } remove_directory(&work_dir).unwrap(); } + fn test_none_match() { + let work_dir = PathBuf::from("/tmp/0x12345678"); + remove_directory(&work_dir).unwrap(); + create_directory(&work_dir).unwrap(); + { + let access = + ComputeStateAccess::new(None, Vec::new(), String::from("0x12345678"), "/tmp") + .unwrap(); + + let cycle: u64 = 844424930131968; + for c in [cycle] { + create_directory(&access.work_path.join(format!("{c}"))).unwrap(); + } + + assert_eq!(access.closest_snapshot(0).unwrap(), None); + assert_eq!(access.closest_snapshot(5629).unwrap(), None); + assert_eq!(access.closest_snapshot(5629499).unwrap(), None); + assert_eq!(access.closest_snapshot(56294995342).unwrap(), None); + assert_eq!(access.closest_snapshot(562949953421312).unwrap(), None); + assert_eq!( + access.closest_snapshot(cycle).unwrap(), + Some((cycle, access.work_path.join(format!("{}", cycle)))) + ); + assert_eq!( + access.closest_snapshot(cycle + 1).unwrap(), + Some((cycle, access.work_path.join(format!("{}", cycle)))) + ); + + remove_directory(&work_dir).unwrap(); + } + } + fn test_compute_tree() { let work_dir = PathBuf::from("/tmp/0x12345678"); remove_directory(&work_dir).unwrap(); create_directory(&work_dir).unwrap(); let access = - DisputeStateAccess::new(Vec::new(), Vec::new(), String::from("0x12345678"), "/tmp") - .unwrap(); + ComputeStateAccess::new(None, Vec::new(), String::from("0x12345678"), "/tmp").unwrap(); let root = [ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, @@ -284,11 +369,36 @@ mod dispute_state_access_tests { assert!(tree.0.subtrees().is_some()); } + fn test_compute_or_rollups_true() { + let work_dir = PathBuf::from("/tmp/0x12345678"); + remove_directory(&work_dir).unwrap(); + create_directory(&work_dir).unwrap(); + let access = ComputeStateAccess::new( + Some(Vec::new()), + Vec::new(), + String::from("0x12345678"), + "/tmp", + ) + .unwrap(); + + assert!(matches!(access.handle_rollups, true)); + } + + fn test_compute_or_rollups_false() { + let work_dir = PathBuf::from("/tmp/0x12345678"); + remove_directory(&work_dir).unwrap(); + create_directory(&work_dir).unwrap(); + let access = + ComputeStateAccess::new(None, Vec::new(), String::from("0x12345678"), "/tmp").unwrap(); + + assert!(matches!(access.handle_rollups, false)); + } + #[test] fn test_deserialize() { let json_str_1 = r#"{"leafs": [["0x01020304050607abcdef01020304050607abcdef01020304050607abcdef0102", 20], ["0x01020304050607fedcba01020304050607fedcba01020304050607fedcba0102", 13]]}"#; let inputs_and_leafs_1: InputsAndLeafs = serde_json::from_str(json_str_1).unwrap(); - assert_eq!(inputs_and_leafs_1.inputs.len(), 0); + assert_eq!(inputs_and_leafs_1.inputs.unwrap_or_default().len(), 0); assert_eq!(inputs_and_leafs_1.leafs.len(), 2); assert_eq!( inputs_and_leafs_1.leafs[0].0, @@ -307,14 +417,15 @@ mod dispute_state_access_tests { let json_str_2 = r#"{"inputs": [], "leafs": [["0x01020304050607abcdef01020304050607abcdef01020304050607abcdef0102", 20], ["0x01020304050607fedcba01020304050607fedcba01020304050607fedcba0102", 13]]}"#; let inputs_and_leafs_2: InputsAndLeafs = serde_json::from_str(json_str_2).unwrap(); - assert_eq!(inputs_and_leafs_2.inputs.len(), 0); + assert_eq!(inputs_and_leafs_2.inputs.unwrap_or_default().len(), 0); assert_eq!(inputs_and_leafs_2.leafs.len(), 2); let json_str_3 = r#"{"inputs": ["0x12345678", "0x22345678"], "leafs": [["0x01020304050607abcdef01020304050607abcdef01020304050607abcdef0102", 20], ["0x01020304050607fedcba01020304050607fedcba01020304050607fedcba0102", 13]]}"#; let inputs_and_leafs_3: InputsAndLeafs = serde_json::from_str(json_str_3).unwrap(); - assert_eq!(inputs_and_leafs_3.inputs.len(), 2); + let inputs_3 = inputs_and_leafs_3.inputs.unwrap(); + assert_eq!(inputs_3.len(), 2); assert_eq!(inputs_and_leafs_3.leafs.len(), 2); - assert_eq!(inputs_and_leafs_3.inputs[0].0, [18, 52, 86, 120]); - assert_eq!(inputs_and_leafs_3.inputs[1].0, [34, 52, 86, 120]); + assert_eq!(inputs_3[0].0, [18, 52, 86, 120]); + assert_eq!(inputs_3[1].0, [34, 52, 86, 120]); } } diff --git a/prt/client-rs/src/db/mod.rs b/prt/client-rs/src/db/mod.rs index 050effad..93a5489e 100644 --- a/prt/client-rs/src/db/mod.rs +++ b/prt/client-rs/src/db/mod.rs @@ -1,6 +1,6 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -pub mod dispute_state_access; +pub mod compute_state_access; pub(crate) mod sql; diff --git a/prt/client-rs/src/db/sql/dispute_data.rs b/prt/client-rs/src/db/sql/compute_data.rs similarity index 81% rename from prt/client-rs/src/db/sql/dispute_data.rs rename to prt/client-rs/src/db/sql/compute_data.rs index facde827..ccb3d3ee 100644 --- a/prt/client-rs/src/db/sql/dispute_data.rs +++ b/prt/client-rs/src/db/sql/compute_data.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) use super::error::*; -use crate::db::dispute_state_access::{Input, Leaf}; +use crate::db::compute_state_access::{Input, Leaf}; use rusqlite::{params, OptionalExtension}; @@ -45,6 +45,24 @@ pub fn input(conn: &rusqlite::Connection, id: u64) -> Result>> { Ok(i) } +pub fn inputs(conn: &rusqlite::Connection) -> Result>> { + let mut stmt = conn.prepare( + "\ + SELECT * FROM inputs + ORDER BY input_index ASC + ", + )?; + + let query = stmt.query_map([], |r| Ok(r.get("input")?))?; + + let mut res = vec![]; + for row in query { + res.push(row?); + } + + Ok(res) +} + // // Compute leafs // @@ -161,7 +179,37 @@ pub fn compute_tree_count(conn: &rusqlite::Connection, tree_root: &[u8]) -> Resu )?) } -pub fn insert_dispute_data<'a>( +// +// Handle rollups +// + +fn insert_handle_rollups_statement(conn: &rusqlite::Connection) -> Result { + Ok(conn.prepare( + "\ + INSERT INTO compute_or_rollups (id, handle_rollups) VALUES (0, ?1) + ", + )?) +} + +pub fn insert_handle_rollups(conn: &rusqlite::Connection, handle_rollups: bool) -> Result<()> { + let mut stmt = insert_handle_rollups_statement(&conn)?; + stmt.execute(params![handle_rollups])?; + + Ok(()) +} + +pub fn handle_rollups(conn: &rusqlite::Connection) -> Result { + Ok(conn.query_row( + "\ + SELECT handle_rollups FROM compute_or_rollups + WHERE id = 0 + ", + [], + |row| row.get(0), + )?) +} + +pub fn insert_compute_data<'a>( conn: &rusqlite::Connection, inputs: impl Iterator, leafs: impl Iterator, @@ -318,3 +366,25 @@ mod trees_tests { assert!(matches!(compute_tree(&conn, &root).unwrap().len(), 2)); } } + +#[cfg(test)] +mod compute_or_rollups_tests { + use super::*; + + #[test] + fn test_empty() { + let conn = test_helper::setup_db(); + assert!(matches!(handle_rollups(&conn), Err(_))); + } + + #[test] + fn test_insert() { + let conn = test_helper::setup_db(); + + assert!(matches!(insert_handle_rollups(&conn, true), Ok(()))); + assert!(matches!(handle_rollups(&conn), Ok(true))); + // compute_or_rollups can only be set once + assert!(matches!(insert_handle_rollups(&conn, true), Err(_))); + assert!(matches!(handle_rollups(&conn), Ok(true))); + } +} diff --git a/prt/client-rs/src/db/sql/error.rs b/prt/client-rs/src/db/sql/error.rs index 492234db..abf8462d 100644 --- a/prt/client-rs/src/db/sql/error.rs +++ b/prt/client-rs/src/db/sql/error.rs @@ -4,7 +4,7 @@ use thiserror::Error; #[derive(Error, Debug)] -pub enum DisputeStateAccessError { +pub enum ComputeStateAccessError { #[error(transparent)] Digest { #[from] @@ -39,4 +39,4 @@ pub enum DisputeStateAccessError { DataNotFound { description: String }, } -pub type Result = std::result::Result; +pub type Result = std::result::Result; diff --git a/prt/client-rs/src/db/sql/migrations.sql b/prt/client-rs/src/db/sql/migrations.sql index 6ef90062..eac1b48a 100644 --- a/prt/client-rs/src/db/sql/migrations.sql +++ b/prt/client-rs/src/db/sql/migrations.sql @@ -19,3 +19,8 @@ CREATE TABLE compute_trees ( tree_leaf BLOB NOT NULL, PRIMARY KEY (tree_root, tree_leaf_index) ); + +CREATE TABLE compute_or_rollups ( + id INTEGER NOT NULL PRIMARY KEY, + handle_rollups INTEGER NOT NULL +); diff --git a/prt/client-rs/src/db/sql/mod.rs b/prt/client-rs/src/db/sql/mod.rs index b072fed3..4e7d4a2c 100644 --- a/prt/client-rs/src/db/sql/mod.rs +++ b/prt/client-rs/src/db/sql/mod.rs @@ -1,3 +1,3 @@ -pub mod dispute_data; +pub mod compute_data; pub mod error; pub mod migrations; diff --git a/prt/client-rs/src/machine/commitment.rs b/prt/client-rs/src/machine/commitment.rs index 259360cd..c17ef737 100644 --- a/prt/client-rs/src/machine/commitment.rs +++ b/prt/client-rs/src/machine/commitment.rs @@ -2,11 +2,13 @@ //! described on the paper https://arxiv.org/pdf/2212.12439.pdf. use anyhow::Result; +use log::trace; +use std::io::{self, Write}; use std::{ops::ControlFlow, sync::Arc}; use crate::{ - db::dispute_state_access::{DisputeStateAccess, Leaf}, - machine::{constants, MachineInstance}, + db::compute_state_access::{ComputeStateAccess, Leaf}, + machine::{constants, MachineInstance, MachineState}, }; use cartesi_dave_arithmetic as arithmetic; use cartesi_dave_merkle::{Digest, MerkleBuilder, MerkleTree}; @@ -21,15 +23,12 @@ pub struct MachineCommitment { /// Builds a [MachineCommitment] from a [MachineInstance] and a base cycle and leafs. pub fn build_machine_commitment_from_leafs( - machine: &mut MachineInstance, - base_cycle: u64, leafs: Vec<(L, u64)>, + initial_state: Digest, ) -> Result where L: Into>, { - machine.run(base_cycle)?; - let initial_state = machine.machine_state()?; let mut builder = MerkleBuilder::default(); for leaf in leafs { builder.append_repeated(leaf.0, leaf.1); @@ -37,7 +36,7 @@ where let tree = builder.build(); Ok(MachineCommitment { - implicit_hash: initial_state.root_hash, + implicit_hash: initial_state, merkle: tree, }) } @@ -49,12 +48,15 @@ pub fn build_machine_commitment( level: u64, log2_stride: u64, log2_stride_count: u64, - db: &DisputeStateAccess, + initial_state: Digest, + db: &ComputeStateAccess, ) -> Result { if log2_stride >= constants::LOG2_UARCH_SPAN { assert!( log2_stride + log2_stride_count - <= constants::LOG2_EMULATOR_SPAN + constants::LOG2_UARCH_SPAN + <= constants::LOG2_INPUT_SPAN + + constants::LOG2_EMULATOR_SPAN + + constants::LOG2_UARCH_SPAN ); build_big_machine_commitment( machine, @@ -62,11 +64,19 @@ pub fn build_machine_commitment( level, log2_stride, log2_stride_count, + initial_state, db, ) } else { assert!(log2_stride == 0); - build_small_machine_commitment(machine, base_cycle, level, log2_stride_count, db) + build_small_machine_commitment( + machine, + base_cycle, + level, + log2_stride_count, + initial_state, + db, + ) } } @@ -77,17 +87,19 @@ pub fn build_big_machine_commitment( level: u64, log2_stride: u64, log2_stride_count: u64, - db: &DisputeStateAccess, + initial_state: Digest, + db: &ComputeStateAccess, ) -> Result { - machine.run(base_cycle)?; - snapshot_base_cycle(machine, base_cycle, db)?; - let initial_state = machine.machine_state()?; - let mut builder = MerkleBuilder::default(); let mut leafs = Vec::new(); let instruction_count = arithmetic::max_uint(log2_stride_count); for instruction in 0..=instruction_count { + print_flush_same_line(&format!( + "building big machine commitment ({}/{})...", + instruction, instruction_count + )); + let control_flow = advance_instruction( instruction, log2_stride, @@ -101,13 +113,14 @@ pub fn build_big_machine_commitment( break; } } + finish_print_flush_same_line(); let merkle = builder.build(); let compute_leafs: Vec = leafs.iter().map(|l| Leaf(l.0.data(), l.1)).collect(); db.insert_compute_leafs(level, base_cycle, compute_leafs.iter())?; Ok(MachineCommitment { - implicit_hash: initial_state.root_hash, + implicit_hash: initial_state, merkle, }) } @@ -122,10 +135,10 @@ fn advance_instruction( leafs: &mut Vec<(Digest, u64)>, ) -> Result> { let cycle = (instruction + 1) << (log2_stride - constants::LOG2_UARCH_SPAN); - machine.run(base_cycle + cycle)?; - let state = machine.machine_state()?; - let control_flow = if state.halted { + let state = machine.run(base_cycle + cycle)?; + let control_flow = if state.halted | state.yielded { leafs.push((state.root_hash, instruction_count - instruction + 1)); + trace!("big advance halted/yielded",); builder.append_repeated(state.root_hash, instruction_count - instruction + 1); ControlFlow::Break(()) } else { @@ -141,14 +154,12 @@ pub fn build_small_machine_commitment( base_cycle: u64, level: u64, log2_stride_count: u64, - db: &DisputeStateAccess, + initial_state: Digest, + db: &ComputeStateAccess, ) -> Result { - machine.run(base_cycle)?; - snapshot_base_cycle(machine, base_cycle, db)?; - let initial_state = machine.machine_state()?; - let mut builder = MerkleBuilder::default(); let mut leafs = Vec::new(); + let mut uarch_span_and_leafs = Vec::new(); let instruction_count = arithmetic::max_uint(log2_stride_count - constants::LOG2_UARCH_SPAN); let mut instruction = 0; loop { @@ -156,77 +167,91 @@ pub fn build_small_machine_commitment( break; } - let uarch_span = run_uarch_span(machine, db)?; - leafs.push((uarch_span.root_hash(), 1)); - builder.append(uarch_span); + print_flush_same_line(&format!( + "building small machine commitment ({}/{})...", + instruction, instruction_count + )); + + let (mut uarch_tree, machine_state, mut uarch_leafs) = run_uarch_span(machine)?; + uarch_span_and_leafs.push((uarch_tree.root_hash(), uarch_leafs.clone())); + leafs.push((uarch_tree.root_hash(), 1)); + builder.append(uarch_tree.clone()); instruction += 1; - let state = machine.machine_state()?; - if state.halted { - let uarch_span = run_uarch_span(machine, db)?; - leafs.push((uarch_span.root_hash(), instruction_count - instruction + 1)); - builder.append_repeated(uarch_span, instruction_count - instruction + 1); + if machine_state.halted | machine_state.yielded { + (uarch_tree, _, uarch_leafs) = run_uarch_span(machine)?; + trace!( + "uarch span machine halted/yielded {} {}", + uarch_tree.root_hash(), + instruction + ); + uarch_span_and_leafs.push((uarch_tree.root_hash(), uarch_leafs)); + leafs.push((uarch_tree.root_hash(), instruction_count - instruction + 1)); + builder.append_repeated(uarch_tree, instruction_count - instruction + 1); break; } } + finish_print_flush_same_line(); + let merkle = builder.build(); let compute_leafs: Vec = leafs.iter().map(|l| Leaf(l.0.data(), l.1)).collect(); db.insert_compute_leafs(level, base_cycle, compute_leafs.iter())?; + db.insert_compute_trees(uarch_span_and_leafs.iter())?; Ok(MachineCommitment { - implicit_hash: initial_state.root_hash, + implicit_hash: initial_state, merkle, }) } -fn snapshot_base_cycle( - machine: &mut MachineInstance, - base_cycle: u64, - db: &DisputeStateAccess, -) -> Result<()> { - let snapshot_path = db.work_path.join(format!("{}", base_cycle)); - machine.snapshot(&snapshot_path)?; - Ok(()) -} - fn run_uarch_span( machine: &mut MachineInstance, - db: &DisputeStateAccess, -) -> Result> { - let (_, ucycle) = machine.position(); +) -> Result<(Arc, MachineState, Vec)> { + let (_, ucycle, _) = machine.position()?; assert!(ucycle == 0); - machine.increment_uarch()?; + let mut machine_state = machine.increment_uarch()?; let mut builder = MerkleBuilder::default(); let mut leafs = Vec::new(); let mut i = 0; - let mut state = loop { - let mut state = machine.machine_state()?; - leafs.push((state.root_hash, 1)); - builder.append(state.root_hash); + loop { + leafs.push((machine_state.root_hash, 1)); + builder.append(machine_state.root_hash); - machine.increment_uarch()?; + machine_state = machine.increment_uarch()?; i += 1; - - state = machine.machine_state()?; - if state.uhalted { - break state; + if machine_state.uhalted { + trace!("uarch halted"); + break; } - }; - - leafs.push((state.root_hash, constants::UARCH_SPAN - i)); - builder.append_repeated(state.root_hash, constants::UARCH_SPAN - i); + } - machine.ureset()?; - state = machine.machine_state()?; - leafs.push((state.root_hash, 1)); - builder.append(state.root_hash); + leafs.push((machine_state.root_hash, constants::UARCH_SPAN - i)); + builder.append_repeated(machine_state.root_hash, constants::UARCH_SPAN - i); + trace!("state before reset {}", machine_state.root_hash); + machine_state = machine.ureset()?; + trace!("state after reset {}", machine_state.root_hash); + leafs.push((machine_state.root_hash, 1)); + builder.append(machine_state.root_hash); let uarch_span = builder.build(); + + // prepare uarch leafs for later db insertion let tree_leafs: Vec = leafs.iter().map(|l| Leaf(l.0.data(), l.1)).collect(); - db.insert_compute_tree(uarch_span.root_hash().slice(), tree_leafs.iter())?; - Ok(uarch_span) + Ok((uarch_span, machine_state, tree_leafs)) +} + +fn print_flush_same_line(args: &str) { + print!("\r{}", args); + // Flush the output to ensure it appears immediately + io::stdout().flush().unwrap(); +} + +fn finish_print_flush_same_line() { + println!(""); + // Flush the output to ensure it appears immediately + io::stdout().flush().unwrap(); } diff --git a/prt/client-rs/src/machine/commitment_builder.rs b/prt/client-rs/src/machine/commitment_builder.rs index 6f114f68..db2d7879 100644 --- a/prt/client-rs/src/machine/commitment_builder.rs +++ b/prt/client-rs/src/machine/commitment_builder.rs @@ -2,7 +2,7 @@ //! [MachineCommitment]. It is used by the [Arena] to build the commitments of the tournaments. use crate::{ - db::dispute_state_access::DisputeStateAccess, + db::compute_state_access::ComputeStateAccess, machine::{ build_machine_commitment, build_machine_commitment_from_leafs, MachineCommitment, MachineInstance, @@ -10,10 +10,8 @@ use crate::{ }; use anyhow::Result; -use std::{ - collections::{hash_map::Entry, HashMap}, - path::PathBuf, -}; +use log::trace; +use std::collections::{hash_map::Entry, HashMap}; pub struct CachingMachineCommitmentBuilder { machine_path: String, @@ -34,24 +32,36 @@ impl CachingMachineCommitmentBuilder { level: u64, log2_stride: u64, log2_stride_count: u64, - db: &DisputeStateAccess, + db: &ComputeStateAccess, ) -> Result { if let Entry::Vacant(e) = self.commitments.entry(level) { e.insert(HashMap::new()); - } else if self.commitments[&level].contains_key(&base_cycle) { - return Ok(self.commitments[&level][&base_cycle].clone()); + } else if let Some(commitment) = self.commitments[&level].get(&base_cycle) { + return Ok(commitment.clone()); } let mut machine = MachineInstance::new(&self.machine_path)?; - if let Some(snapshot_path) = db.closest_snapshot(base_cycle)? { - machine.load_snapshot(&PathBuf::from(snapshot_path))?; + if let Some(snapshot) = db.closest_snapshot(base_cycle)? { + machine.load_snapshot(&snapshot.1, snapshot.0)?; }; + let initial_state = { + if db.handle_rollups { + // treat it as rollups + machine.run_with_inputs(base_cycle, &db)?.root_hash + } else { + // treat it as compute + let root_hash = machine.run(base_cycle)?.root_hash; + machine.take_snapshot(base_cycle, &db)?; + root_hash + } + }; + trace!("initial state for commitment: {}", initial_state); let commitment = { let leafs = db.compute_leafs(level, base_cycle)?; // leafs are cached in database, use it to calculate merkle if leafs.len() > 0 { - build_machine_commitment_from_leafs(&mut machine, base_cycle, leafs)? + build_machine_commitment_from_leafs(leafs, initial_state)? } else { // leafs are not cached, build merkle by running the machine build_machine_commitment( @@ -60,6 +70,7 @@ impl CachingMachineCommitmentBuilder { level, log2_stride, log2_stride_count, + initial_state, db, )? } diff --git a/prt/client-rs/src/machine/instance.rs b/prt/client-rs/src/machine/instance.rs index 65d6830b..e341463b 100644 --- a/prt/client-rs/src/machine/instance.rs +++ b/prt/client-rs/src/machine/instance.rs @@ -1,19 +1,25 @@ +use crate::db::compute_state_access::{ComputeStateAccess, Input}; use crate::machine::constants; use cartesi_dave_arithmetic as arithmetic; use cartesi_dave_merkle::Digest; use cartesi_machine::{ configuration::RuntimeConfig, + htif, log::{AccessLog, AccessLogType}, machine::Machine, }; +use log::{debug, trace}; +use alloy::hex::ToHexExt; use anyhow::Result; +use ruint::aliases::U256; use std::path::Path; #[derive(Debug)] pub struct MachineState { pub root_hash: Digest, pub halted: bool, + pub yielded: bool, pub uhalted: bool, } @@ -21,8 +27,11 @@ impl std::fmt::Display for MachineState { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, - "{{root_hash = {:?}, halted = {}, uhalted = {}}}", - self.root_hash, self.halted, self.uhalted + "{{root_hash = {}, halted = {}, yielded = {}, uhalted = {}}}", + self.root_hash.to_hex(), + self.halted, + self.yielded, + self.uhalted ) } } @@ -56,28 +65,40 @@ impl MachineInstance { ucycle: 0, }) } + pub fn take_snapshot(&mut self, base_cycle: u64, db: &ComputeStateAccess) -> Result<()> { + let mask = arithmetic::max_uint(constants::LOG2_EMULATOR_SPAN); + if db.handle_rollups && base_cycle & mask == 0 { + // don't snapshot a machine state that's freshly fed with input without advance + assert!( + self.machine_state()?.yielded, + "don't snapshot a machine state that's freshly fed with input without advance", + ); + } + + let snapshot_path = db.work_path.join(format!("{}", base_cycle)); + if !snapshot_path.exists() { + self.machine.store(&snapshot_path)?; + } + Ok(()) + } // load inner machine with snapshot, update cycle, keep everything else the same - pub fn load_snapshot(&mut self, snapshot_path: &Path) -> Result<()> { + pub fn load_snapshot(&mut self, snapshot_path: &Path, snapshot_cycle: u64) -> Result<()> { let machine = Machine::load(&Path::new(snapshot_path), RuntimeConfig::default())?; let cycle = machine.read_mcycle()?; // Machine can not go backward behind the initial machine assert!(cycle >= self.start_cycle); - self.cycle = cycle - self.start_cycle; + self.cycle = snapshot_cycle; assert_eq!(machine.read_uarch_cycle()?, 0); self.machine = machine; - Ok(()) - } + debug!("load snapshot from {}", snapshot_path.display()); + debug!("loaded machine: {}", self.machine_state()?); - pub fn snapshot(&self, snapshot_path: &Path) -> Result<()> { - if !snapshot_path.exists() { - self.machine.store(snapshot_path)?; - } Ok(()) } @@ -85,39 +106,100 @@ impl MachineInstance { self.root_hash } - pub fn get_logs(&mut self, cycle: u64, ucycle: u64) -> Result { - self.run(cycle)?; - self.run_uarch(ucycle)?; - - let access_log = AccessLogType { + pub fn get_logs( + &mut self, + cycle: u64, + ucycle: u64, + db: &ComputeStateAccess, + ) -> Result { + let log_type = AccessLogType { annotations: true, proofs: true, large_data: false, }; - let logs; - if ucycle == constants::UARCH_SPAN { - logs = self.machine.log_uarch_reset(access_log, false)?; + let mut logs = Vec::new(); + let mut encode_input = None; + if db.handle_rollups { + // treat it as rollups + // the cycle may be the cycle to receive input, + // we need to include the process of feeding input to the machine in the log + if cycle == 0 { + self.run(cycle)?; + } else { + self.run_with_inputs(cycle - 1, db)?; + self.run(cycle)?; + } + + let mask = arithmetic::max_uint(constants::LOG2_EMULATOR_SPAN); + let inputs = &db.inputs()?; + let input = inputs.get((cycle >> constants::LOG2_EMULATOR_SPAN) as usize); + if cycle & mask == 0 { + if let Some(data) = input { + // need to process input + if ucycle == 0 { + let cmio_logs = self.machine.log_send_cmio_response( + htif::fromhost::ADVANCE_STATE, + &data, + log_type, + false, + )?; + // append step logs to cmio logs + let step_logs = self.machine.log_uarch_step(log_type, false)?; + logs.push(&cmio_logs); + logs.push(&step_logs); + return Ok(encode_access_logs(logs, Some(Input { 0: data.clone() }))); + } else { + self.machine + .send_cmio_response(htif::fromhost::ADVANCE_STATE, &data)?; + } + } else { + if ucycle == 0 { + encode_input = Some(Input { 0: Vec::new() }); + } + } + } } else { - logs = self.machine.log_uarch_step(access_log, false)?; + // treat it as compute + self.run(cycle)?; } - Ok(encode_access_log(&logs)) + self.run_uarch(ucycle)?; + if ucycle == constants::UARCH_SPAN { + let reset_logs = self.machine.log_uarch_reset(log_type, false)?; + logs.push(&reset_logs); + Ok(encode_access_logs(logs, encode_input)) + } else { + let step_logs = self.machine.log_uarch_step(log_type, false)?; + logs.push(&step_logs); + Ok(encode_access_logs(logs, encode_input)) + } } - pub fn run(&mut self, cycle: u64) -> Result<()> { + // Runs to the `cycle` directly and returns the machine state after the run + pub fn run(&mut self, cycle: u64) -> Result { assert!(self.cycle <= cycle); - let physical_cycle = arithmetic::add_and_clamp(self.start_cycle, cycle); + let mcycle = self.machine.read_mcycle()?; + + let physical_cycle = arithmetic::add_and_clamp(mcycle, cycle - self.cycle); + trace!("physical cycle {}", physical_cycle); loop { let halted = self.machine.read_iflags_h()?; if halted { + trace!("run break with halt"); + break; + } + + let yielded = self.machine.read_iflags_y()?; + if yielded { + trace!("run break with yield"); break; } - let mcycle = self.machine.read_mcycle()?; - if mcycle == physical_cycle { + if self.machine.read_mcycle()? == physical_cycle { + trace!("run break with meeting physical cycle"); break; } @@ -126,7 +208,7 @@ impl MachineInstance { self.cycle = cycle; - Ok(()) + Ok(self.machine_state()?) } pub fn run_uarch(&mut self, ucycle: u64) -> Result<()> { @@ -142,27 +224,90 @@ impl MachineInstance { Ok(()) } - pub fn increment_uarch(&mut self) -> Result<()> { + // Runs to the `cycle` with all necessary inputs added to the machine + // Returns the machine state after the run; + // One exception is that if `cycle` is supposed to receive an input, in this case + // the machine state would be `without` input included in the machine, + // this is useful when we need the initial state to compute the commitments + pub fn run_with_inputs(&mut self, cycle: u64, db: &ComputeStateAccess) -> Result { + trace!( + "run_with_inputs self cycle: {}, target cycle: {}", + self.cycle, + cycle + ); + + let inputs = &db.inputs()?; + let mut machine_state_without_input = self.machine_state()?; + let input_mask = arithmetic::max_uint(constants::LOG2_EMULATOR_SPAN); + let current_input_index = self.cycle >> constants::LOG2_EMULATOR_SPAN; + + let mut next_input_index; + + if self.cycle & input_mask == 0 { + next_input_index = current_input_index; + } else { + next_input_index = current_input_index + 1; + } + let mut next_input_cycle = next_input_index << constants::LOG2_EMULATOR_SPAN; + + while next_input_cycle <= cycle { + trace!("next input index: {}", next_input_index); + trace!("run to next input cycle: {}", next_input_cycle); + machine_state_without_input = self.run(next_input_cycle)?; + if next_input_cycle == cycle { + self.take_snapshot(next_input_cycle, &db)?; + } + + let input = inputs.get(next_input_index as usize); + if let Some(data) = input { + trace!( + "before input, machine state: {}", + self.machine_state()?.root_hash + ); + trace!("input: 0x{}", data.encode_hex()); + + self.machine + .send_cmio_response(htif::fromhost::ADVANCE_STATE, data)?; + + trace!( + "after input, machine state: {}", + self.machine_state()?.root_hash + ); + } + + next_input_index += 1; + next_input_cycle = next_input_index << constants::LOG2_EMULATOR_SPAN; + } + if cycle > self.cycle { + machine_state_without_input = self.run(cycle)?; + self.take_snapshot(cycle, &db)?; + } + Ok(machine_state_without_input) + } + + pub fn increment_uarch(&mut self) -> Result { self.machine.run_uarch(self.ucycle + 1)?; self.ucycle += 1; - Ok(()) + Ok(self.machine_state()?) } - pub fn ureset(&mut self) -> Result<()> { + pub fn ureset(&mut self) -> Result { self.machine.reset_uarch()?; self.cycle += 1; self.ucycle = 0; - Ok(()) + Ok(self.machine_state()?) } pub fn machine_state(&mut self) -> Result { let root_hash = self.machine.get_root_hash()?; let halted = self.machine.read_iflags_h()?; + let yielded = self.machine.read_iflags_y()?; let uhalted = self.machine.read_uarch_halt_flag()?; Ok(MachineState { root_hash: Digest::from_digest(root_hash.as_bytes())?, halted, + yielded, uhalted, }) } @@ -173,27 +318,36 @@ impl MachineInstance { Ok(()) } - pub fn position(&self) -> (u64, u64) { - (self.cycle, self.ucycle) + pub fn position(&self) -> Result<(u64, u64, u64)> { + Ok((self.cycle, self.ucycle, self.machine.read_mcycle()?)) } } -fn encode_access_log(log: &AccessLog) -> Vec { +fn encode_access_logs(logs: Vec<&AccessLog>, encode_input: Option) -> Vec { let mut encoded: Vec> = Vec::new(); - for a in log.accesses().iter() { - if a.log2_size() == 3 { - encoded.push(a.read_data().to_vec()); - } else { - encoded.push(a.read_hash().as_bytes().to_vec()); + if let Some(i) = encode_input { + encoded.push(U256::from(i.0.len()).to_be_bytes_vec()); + if i.0.len() > 0 { + encoded.push(i.0); } + } - let decoded_siblings: Vec> = a - .sibling_hashes() - .iter() - .map(|h| h.as_bytes().to_vec()) - .collect(); - encoded.extend_from_slice(&decoded_siblings); + for log in logs.iter() { + for a in log.accesses().iter() { + if a.log2_size() == 3 { + encoded.push(a.read_data().to_vec()); + } else { + encoded.push(a.read_hash().as_bytes().to_vec()); + } + + let decoded_siblings: Vec> = a + .sibling_hashes() + .iter() + .map(|h| h.as_bytes().to_vec()) + .collect(); + encoded.extend_from_slice(&decoded_siblings); + } } encoded.iter().flatten().cloned().collect() diff --git a/prt/client-rs/src/strategy/player.rs b/prt/client-rs/src/strategy/player.rs index 4400de3e..c474566b 100644 --- a/prt/client-rs/src/strategy/player.rs +++ b/prt/client-rs/src/strategy/player.rs @@ -1,6 +1,6 @@ -use std::{collections::HashMap, path::PathBuf}; +use std::collections::HashMap; -use ::log::{error, info}; +use ::log::{debug, error, info}; use alloy::sol_types::private::Address; use anyhow::Result; use async_recursion::async_recursion; @@ -12,7 +12,7 @@ use crate::{ ArenaSender, BlockchainConfig, CommitmentMap, CommitmentState, MatchState, StateReader, TournamentState, TournamentStateMap, TournamentWinner, }, - db::dispute_state_access::{DisputeStateAccess, Input, Leaf}, + db::compute_state_access::{ComputeStateAccess, Input, Leaf}, machine::{constants, CachingMachineCommitmentBuilder, MachineCommitment, MachineInstance}, strategy::gc::GarbageCollector, }; @@ -25,7 +25,7 @@ pub enum PlayerTournamentResult { } pub struct Player { - db: DisputeStateAccess, + db: ComputeStateAccess, machine_path: String, commitment_builder: CachingMachineCommitmentBuilder, root_tournament: Address, @@ -35,14 +35,14 @@ pub struct Player { impl Player { pub fn new( - inputs: Vec, + inputs: Option>, leafs: Vec, blockchain_config: &BlockchainConfig, machine_path: String, root_tournament: Address, ) -> Result { let db = - DisputeStateAccess::new(inputs, leafs, root_tournament.to_string(), "/dispute_data")?; + ComputeStateAccess::new(inputs, leafs, root_tournament.to_string(), "/compute_data")?; let reader = StateReader::new(&blockchain_config)?; let gc = GarbageCollector::new(root_tournament); let commitment_builder = CachingMachineCommitmentBuilder::new(machine_path.clone()); @@ -99,6 +99,7 @@ impl Player { tournament_states: &TournamentStateMap, ) -> Result> { info!("Enter tournament at address: {}", tournament_address); + // TODO: print final state one and final state two let tournament_state = get_tournament_state(&tournament_states, tournament_address); commitments.insert( @@ -262,7 +263,7 @@ impl Player { tournament_state.max_level, tournament_states, ) - .await + .await?; } else if match_state.current_height == 1 { self.react_unsealed_match( arena_sender, @@ -271,7 +272,7 @@ impl Player { tournament_state.level, tournament_state.max_level, ) - .await + .await?; } else { self.react_running_match( arena_sender, @@ -279,8 +280,10 @@ impl Player { commitment, tournament_state.level, ) - .await + .await?; } + + Ok(()) } async fn win_timeout_match<'a>( @@ -359,10 +362,10 @@ impl Player { let proof = { let mut machine = MachineInstance::new(&self.machine_path)?; - if let Some(snapshot_path) = self.db.closest_snapshot(cycle)? { - machine.load_snapshot(&PathBuf::from(snapshot_path))?; + if let Some(snapshot) = self.db.closest_snapshot(cycle)? { + machine.load_snapshot(&snapshot.1, snapshot.0)?; }; - machine.get_logs(cycle, ucycle)? + machine.get_logs(cycle, ucycle, &self.db)? }; info!( @@ -480,9 +483,11 @@ impl Player { let (left, right) = r.subtrees().expect("merkle tree should have subtrees"); let (new_left, new_right) = if left.root_hash() != match_state.left_node { - left.subtrees().expect("merkle tree should have subtrees") + debug!("going down to the left"); + left.subtrees().expect("left tree should have subtrees") } else { - right.subtrees().expect("merkle tree should have subtrees") + debug!("going down to the right"); + right.subtrees().expect("right tree should have subtrees") }; info!( diff --git a/prt/contract-bindings/Cargo.toml b/prt/contract-bindings/Cargo.toml index 515a7540..a1aaef4c 100644 --- a/prt/contract-bindings/Cargo.toml +++ b/prt/contract-bindings/Cargo.toml @@ -15,5 +15,5 @@ readme = "README.md" repository = "https://github.com/cartesi/dave" [dependencies] -alloy = { version = "0.3.1", features = ["sol-types", "contract"] } +alloy = { version = "0.8.0", features = ["sol-types", "contract"] } diff --git a/prt/contracts/foundry.toml b/prt/contracts/foundry.toml index a7cdd817..1b7a83e9 100644 --- a/prt/contracts/foundry.toml +++ b/prt/contracts/foundry.toml @@ -7,6 +7,7 @@ allow_paths = ['../../machine/step/'] remappings = [ 'step/=../../machine/step/', ] +solc-version = "0.8.27" [fmt] line_length = 80 diff --git a/prt/contracts/src/CanonicalConstants.sol b/prt/contracts/src/CanonicalConstants.sol index ff5db4fe..adfafb05 100644 --- a/prt/contracts/src/CanonicalConstants.sol +++ b/prt/contracts/src/CanonicalConstants.sol @@ -32,20 +32,20 @@ library ArbitrationConstants { uint64 constant LOG2_UARCH_SPAN = 20; uint64 constant LOG2_EMULATOR_SPAN = 48; - uint64 constant LOG2_INPUT_SPAN = LOG2_UARCH_SPAN + LOG2_EMULATOR_SPAN; + uint64 constant LOG2_INPUT_SPAN = 24; // 3-level tournament uint64 constant LEVELS = 3; /// @return log2step gap of each leaf in the tournament[level] function log2step(uint64 level) internal pure returns (uint64) { - uint64[LEVELS] memory arr = [uint64(41), uint64(26), uint64(0)]; + uint64[LEVELS] memory arr = [uint64(44), uint64(28), uint64(0)]; return arr[level]; } /// @return height of the tournament[level] tree which is calculated by subtracting the log2step[level] from the log2step[level - 1] function height(uint64 level) internal pure returns (uint64) { - uint64[LEVELS] memory arr = [uint64(27), uint64(15), uint64(26)]; + uint64[LEVELS] memory arr = [uint64(48), uint64(16), uint64(28)]; return arr[level]; } } diff --git a/prt/contracts/src/IDataProvider.sol b/prt/contracts/src/IDataProvider.sol index 2eac6c03..367c0b12 100644 --- a/prt/contracts/src/IDataProvider.sol +++ b/prt/contracts/src/IDataProvider.sol @@ -12,5 +12,6 @@ interface IDataProvider { /// @return Size of the response (in bytes) function gio(uint16 namespace, bytes calldata id, bytes calldata extra) external + view returns (bytes32, uint256); } diff --git a/prt/contracts/src/IMultiLevelTournamentFactory.sol b/prt/contracts/src/IMultiLevelTournamentFactory.sol index 3f9ec4aa..422c716b 100644 --- a/prt/contracts/src/IMultiLevelTournamentFactory.sol +++ b/prt/contracts/src/IMultiLevelTournamentFactory.sol @@ -10,7 +10,7 @@ import "./tournament/concretes/MiddleTournament.sol"; import "./tournament/concretes/BottomTournament.sol"; interface IMultiLevelTournamentFactory is ITournamentFactory { - function instantiateTop(Machine.Hash _initialHash) + function instantiateTop(Machine.Hash _initialHash, IDataProvider _provider) external returns (TopTournament); @@ -22,7 +22,8 @@ interface IMultiLevelTournamentFactory is ITournamentFactory { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) external returns (MiddleTournament); function instantiateBottom( @@ -33,6 +34,7 @@ interface IMultiLevelTournamentFactory is ITournamentFactory { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) external returns (BottomTournament); } diff --git a/prt/contracts/src/tournament/abstracts/LeafTournament.sol b/prt/contracts/src/tournament/abstracts/LeafTournament.sol index adc72691..eaca500e 100644 --- a/prt/contracts/src/tournament/abstracts/LeafTournament.sol +++ b/prt/contracts/src/tournament/abstracts/LeafTournament.sol @@ -7,6 +7,8 @@ import "./Tournament.sol"; import "../../CanonicalConstants.sol"; import "../libs/Commitment.sol"; +import "step/src/EmulatorConstants.sol"; +import "step/src/SendCmioResponse.sol"; import "step/src/UArchStep.sol"; import "step/src/UArchReset.sol"; @@ -53,21 +55,26 @@ abstract contract LeafTournament is Tournament { ); } + error WrongFinalState( + uint256 commitment, Machine.Hash computed, Machine.Hash claimed + ); + error WrongNodesForStep(); + function winLeafMatch( Match.Id calldata _matchId, Tree.Node _leftNode, Tree.Node _rightNode, bytes calldata proofs ) external tournamentNotFinished { - Match.State storage _matchState = matches[_matchId.hashFromId()]; - _matchState.requireExist(); - _matchState.requireIsFinished(); - Clock.State storage _clockOne = clocks[_matchId.commitmentOne]; Clock.State storage _clockTwo = clocks[_matchId.commitmentTwo]; _clockOne.requireInitialized(); _clockTwo.requireInitialized(); + Match.State storage _matchState = matches[_matchId.hashFromId()]; + _matchState.requireExist(); + _matchState.requireIsFinished(); + ( Machine.Hash _agreeHash, uint256 _agreeCycle, @@ -75,11 +82,14 @@ abstract contract LeafTournament is Tournament { Machine.Hash _finalStateTwo ) = _matchState.getDivergence(startCycle); - Machine.Hash _finalState = runMetaStep(_agreeHash, _agreeCycle, proofs); + Machine.Hash _finalState = Machine.Hash.wrap( + metaStep(Machine.Hash.unwrap(_agreeHash), _agreeCycle, proofs) + ); if (_leftNode.join(_rightNode).eq(_matchId.commitmentOne)) { require( - _finalState.eq(_finalStateOne), "final state one doesn't match" + _finalState.eq(_finalStateOne), + WrongFinalState(1, _finalState, _finalStateOne) ); _clockOne.setPaused(); @@ -88,7 +98,8 @@ abstract contract LeafTournament is Tournament { ); } else if (_leftNode.join(_rightNode).eq(_matchId.commitmentTwo)) { require( - _finalState.eq(_finalStateTwo), "final state two doesn't match" + _finalState.eq(_finalStateTwo), + WrongFinalState(2, _finalState, _finalStateTwo) ); _clockTwo.setPaused(); @@ -96,46 +107,78 @@ abstract contract LeafTournament is Tournament { _matchId.commitmentTwo, _clockTwo, _leftNode, _rightNode ); } else { - revert("wrong left/right nodes for step"); + revert WrongNodesForStep(); } // delete storage deleteMatch(_matchId.hashFromId()); } - function runMetaStep( - Machine.Hash machineState, - uint256 counter, - bytes memory proofs - ) internal pure returns (Machine.Hash) { - return Machine.Hash.wrap( - metaStep(Machine.Hash.unwrap(machineState), counter, proofs) - ); - } - // TODO: move to step repo function metaStep( bytes32 machineState, uint256 counter, - bytes memory proofs - ) internal pure returns (bytes32 newMachineState) { + bytes calldata proofs + ) internal view returns (bytes32 newMachineState) { // TODO: create a more convinient constructor. AccessLogs.Context memory accessLogs = AccessLogs.Context(machineState, Buffer.Context(proofs, 0)); - uint256 uarch_mask = (1 << ArbitrationConstants.LOG2_UARCH_SPAN) - 1; - uint256 input_mask = (1 << ArbitrationConstants.LOG2_INPUT_SPAN) - 1; - - if (counter & uarch_mask == uarch_mask) { - UArchReset.reset(accessLogs); - newMachineState = accessLogs.currentRootHash; - } else if (counter & input_mask == input_mask) { - UArchReset.reset(accessLogs); - // TODO: add input - newMachineState = accessLogs.currentRootHash; + uint256 uarch_step_mask = + (1 << ArbitrationConstants.LOG2_UARCH_SPAN) - 1; + uint256 big_step_mask = ( + 1 + << ( + ArbitrationConstants.LOG2_EMULATOR_SPAN + + ArbitrationConstants.LOG2_UARCH_SPAN + ) + ) - 1; + + if (address(provider) == address(0)) { + // this is a inputless version of the meta step implementation primarily used for testing + if ((counter + 1) & uarch_step_mask == 0) { + UArchReset.reset(accessLogs); + } else { + UArchStep.step(accessLogs); + } } else { - UArchStep.step(accessLogs); - newMachineState = accessLogs.currentRootHash; + // rollups meta step handles input + if (counter & big_step_mask == 0) { + uint256 inputLength = uint256(bytes32(proofs[:32])); + accessLogs = AccessLogs.Context( + machineState, Buffer.Context(proofs, 32 + inputLength) + ); + + if (inputLength > 0) { + bytes calldata input = proofs[32:32 + inputLength]; + uint256 inputIndex = counter + >> ( + ArbitrationConstants.LOG2_EMULATOR_SPAN + + ArbitrationConstants.LOG2_UARCH_SPAN + ); // TODO: add input index offset of the epoch + + // TODO: maybe assert retrieved input length matches? + (bytes32 inputMerkleRoot, uint256 retrievedInputLength) = + provider.gio(0, abi.encode(inputIndex), input); + + require(inputLength == retrievedInputLength); + require(inputMerkleRoot != bytes32(0)); + SendCmioResponse.sendCmioResponse( + accessLogs, + EmulatorConstants.HTIF_YIELD_REASON_ADVANCE_STATE, + inputMerkleRoot, + uint32(inputLength) + ); + UArchStep.step(accessLogs); + } else { + UArchStep.step(accessLogs); + } + } else if ((counter + 1) & uarch_step_mask == 0) { + UArchReset.reset(accessLogs); + } else { + UArchStep.step(accessLogs); + } } + newMachineState = accessLogs.currentRootHash; } } diff --git a/prt/contracts/src/tournament/abstracts/NonLeafTournament.sol b/prt/contracts/src/tournament/abstracts/NonLeafTournament.sol index 80daad29..f5da1581 100644 --- a/prt/contracts/src/tournament/abstracts/NonLeafTournament.sol +++ b/prt/contracts/src/tournament/abstracts/NonLeafTournament.sol @@ -93,6 +93,9 @@ abstract contract NonLeafTournament is Tournament { emit newInnerTournament(_matchId.hashFromId(), _inner); } + error ChildTournamentNotFinished(); + error WrongTournamentWinner(Tree.Node commitmentRoot, Tree.Node winner); + function winInnerMatch( NonRootTournament _childTournament, Tree.Node _leftNode, @@ -108,11 +111,14 @@ abstract contract NonLeafTournament is Tournament { (bool finished, Tree.Node _winner, Tree.Node _innerWinner) = _childTournament.innerTournamentWinner(); - require(finished, "child tournament is not finished"); + require(finished, ChildTournamentNotFinished()); _winner.requireExist(); Tree.Node _commitmentRoot = _leftNode.join(_rightNode); - require(_commitmentRoot.eq(_winner), "tournament winner is different"); + require( + _commitmentRoot.eq(_winner), + WrongTournamentWinner(_commitmentRoot, _winner) + ); (Clock.State memory _innerClock,) = _childTournament.getCommitment(_innerWinner); @@ -149,7 +155,8 @@ abstract contract NonLeafTournament is Tournament { _contestedFinalStateTwo, _allowance, _startCycle, - _level + _level, + provider ); } else { _tournament = tournamentFactory.instantiateMiddle( @@ -160,7 +167,8 @@ abstract contract NonLeafTournament is Tournament { _contestedFinalStateTwo, _allowance, _startCycle, - _level + _level, + provider ); } diff --git a/prt/contracts/src/tournament/abstracts/NonRootTournament.sol b/prt/contracts/src/tournament/abstracts/NonRootTournament.sol index 03e7cdff..a23ac4a5 100644 --- a/prt/contracts/src/tournament/abstracts/NonRootTournament.sol +++ b/prt/contracts/src/tournament/abstracts/NonRootTournament.sol @@ -30,8 +30,9 @@ abstract contract NonRootTournament is Tournament { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level - ) Tournament(_initialHash, _allowance, _startCycle, _level) { + uint64 _level, + IDataProvider _provider + ) Tournament(_initialHash, _allowance, _startCycle, _level, _provider) { contestedCommitmentOne = _contestedCommitmentOne; contestedFinalStateOne = _contestedFinalStateOne; contestedCommitmentTwo = _contestedCommitmentTwo; @@ -72,9 +73,13 @@ abstract contract NonRootTournament is Tournament { internal view override - returns (bool) + returns (bool, Machine.Hash, Machine.Hash) { - return contestedFinalStateOne.eq(_finalState) - || contestedFinalStateTwo.eq(_finalState); + return ( + contestedFinalStateOne.eq(_finalState) + || contestedFinalStateTwo.eq(_finalState), + contestedFinalStateOne, + contestedFinalStateTwo + ); } } diff --git a/prt/contracts/src/tournament/abstracts/RootTournament.sol b/prt/contracts/src/tournament/abstracts/RootTournament.sol index 025c99ae..1d6fc3c2 100644 --- a/prt/contracts/src/tournament/abstracts/RootTournament.sol +++ b/prt/contracts/src/tournament/abstracts/RootTournament.sol @@ -11,18 +11,24 @@ abstract contract RootTournament is Tournament, ITournament { // // Constructor // - constructor(Machine.Hash _initialHash) - Tournament(_initialHash, ArbitrationConstants.MAX_ALLOWANCE, 0, 0) + constructor(Machine.Hash _initialHash, IDataProvider _provider) + Tournament( + _initialHash, + ArbitrationConstants.MAX_ALLOWANCE, + 0, + 0, + _provider + ) {} function validContestedFinalState(Machine.Hash) internal pure override - returns (bool) + returns (bool, Machine.Hash, Machine.Hash) { // always returns true in root tournament - return true; + return (true, Machine.ZERO_STATE, Machine.ZERO_STATE); } function arbitrationResult() diff --git a/prt/contracts/src/tournament/abstracts/Tournament.sol b/prt/contracts/src/tournament/abstracts/Tournament.sol index c28a16fd..8f77aa47 100644 --- a/prt/contracts/src/tournament/abstracts/Tournament.sol +++ b/prt/contracts/src/tournament/abstracts/Tournament.sol @@ -4,6 +4,7 @@ pragma solidity ^0.8.17; import "../../CanonicalConstants.sol"; +import "../../IDataProvider.sol"; import "../../Machine.sol"; import "../../Tree.sol"; @@ -45,6 +46,8 @@ abstract contract Tournament { Time.Instant immutable startInstant; Time.Duration immutable allowance; + IDataProvider immutable provider; + // // Storage // @@ -67,14 +70,17 @@ abstract contract Tournament { // // Modifiers // + error TournamentIsFinished(); + error TournamentIsClosed(); + modifier tournamentNotFinished() { - require(!isFinished(), "tournament is finished"); + require(!isFinished(), TournamentIsFinished()); _; } modifier tournamentOpen() { - require(!isClosed(), "tournament check-in elapsed"); + require(!isClosed(), TournamentIsClosed()); _; } @@ -86,13 +92,15 @@ abstract contract Tournament { Machine.Hash _initialHash, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) { initialHash = _initialHash; startCycle = _startCycle; level = _level; startInstant = Time.currentTime(); allowance = _allowance; + provider = _provider; if (_allowance.gt(ArbitrationConstants.MAX_ALLOWANCE)) { _allowance = ArbitrationConstants.MAX_ALLOWANCE; @@ -108,7 +116,7 @@ abstract contract Tournament { internal view virtual - returns (bool); + returns (bool, Machine.Hash, Machine.Hash); // // Methods @@ -161,6 +169,11 @@ abstract contract Tournament { clocks[_matchId.commitmentTwo].advanceClock(); } + error WrongChildren( + uint256 commitment, Tree.Node parent, Tree.Node left, Tree.Node right + ); + error WinByTimeout(); + function winMatchByTimeout( Match.Id calldata _matchId, Tree.Node _leftNode, @@ -176,7 +189,7 @@ abstract contract Tournament { if (_clockOne.hasTimeLeft() && !_clockTwo.hasTimeLeft()) { require( _matchId.commitmentOne.verify(_leftNode, _rightNode), - "child nodes do not match parent (commitmentOne)" + WrongChildren(1, _matchId.commitmentOne, _leftNode, _rightNode) ); _clockOne.deduct(_clockTwo.timeSinceTimeout()); @@ -186,7 +199,7 @@ abstract contract Tournament { } else if (!_clockOne.hasTimeLeft() && _clockTwo.hasTimeLeft()) { require( _matchId.commitmentTwo.verify(_leftNode, _rightNode), - "child nodes do not match parent (commitmentTwo)" + WrongChildren(2, _matchId.commitmentTwo, _leftNode, _rightNode) ); _clockTwo.deduct(_clockOne.timeSinceTimeout()); @@ -194,13 +207,15 @@ abstract contract Tournament { _matchId.commitmentTwo, _clockTwo, _leftNode, _rightNode ); } else { - revert("cannot win by timeout"); + revert WinByTimeout(); } // delete storage deleteMatch(_matchId.hashFromId()); } + error EliminateByTimeout(); + function eliminateMatchByTimeout(Match.Id calldata _matchId) external tournamentNotFinished @@ -226,7 +241,7 @@ abstract contract Tournament { // delete storage deleteMatch(_matchId.hashFromId()); } else { - revert("cannot eliminate by timeout"); + revert EliminateByTimeout(); } } @@ -260,7 +275,6 @@ abstract contract Tournament { return matches[_matchIdHash]; } - // TODO: do we need this? function getMatchCycle(Match.IdHash _matchIdHash) external view @@ -289,13 +303,26 @@ abstract contract Tournament { // // Helper functions // + error InvalidContestedFinalState( + Machine.Hash contestedFinalStateOne, + Machine.Hash contestedFinalStateTwo, + Machine.Hash finalState + ); + function requireValidContestedFinalState(Machine.Hash _finalState) internal view { + ( + bool valid, + Machine.Hash contestedFinalStateOne, + Machine.Hash contestedFinalStateTwo + ) = validContestedFinalState(_finalState); require( - validContestedFinalState(_finalState), - "tournament doesn't have contested final state" + valid, + InvalidContestedFinalState( + contestedFinalStateOne, contestedFinalStateTwo, _finalState + ) ); } diff --git a/prt/contracts/src/tournament/concretes/BottomTournament.sol b/prt/contracts/src/tournament/concretes/BottomTournament.sol index 01eea93f..b03c93ed 100644 --- a/prt/contracts/src/tournament/concretes/BottomTournament.sol +++ b/prt/contracts/src/tournament/concretes/BottomTournament.sol @@ -16,7 +16,8 @@ contract BottomTournament is LeafTournament, NonRootTournament { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) LeafTournament() NonRootTournament( @@ -27,7 +28,8 @@ contract BottomTournament is LeafTournament, NonRootTournament { _contestedFinalStateTwo, _allowance, _startCycle, - _level + _level, + _provider ) {} } diff --git a/prt/contracts/src/tournament/concretes/MiddleTournament.sol b/prt/contracts/src/tournament/concretes/MiddleTournament.sol index 87aac502..1995d81f 100644 --- a/prt/contracts/src/tournament/concretes/MiddleTournament.sol +++ b/prt/contracts/src/tournament/concretes/MiddleTournament.sol @@ -19,6 +19,7 @@ contract MiddleTournament is NonLeafTournament, NonRootTournament { Time.Duration _allowance, uint256 _startCycle, uint64 _level, + IDataProvider _provider, IMultiLevelTournamentFactory _tournamentFactory ) NonLeafTournament(_tournamentFactory) @@ -30,7 +31,8 @@ contract MiddleTournament is NonLeafTournament, NonRootTournament { _contestedFinalStateTwo, _allowance, _startCycle, - _level + _level, + _provider ) {} } diff --git a/prt/contracts/src/tournament/concretes/SingleLevelTournament.sol b/prt/contracts/src/tournament/concretes/SingleLevelTournament.sol index adff1961..424cc408 100644 --- a/prt/contracts/src/tournament/concretes/SingleLevelTournament.sol +++ b/prt/contracts/src/tournament/concretes/SingleLevelTournament.sol @@ -7,8 +7,8 @@ import "../abstracts/RootTournament.sol"; import "../abstracts/LeafTournament.sol"; contract SingleLevelTournament is LeafTournament, RootTournament { - constructor(Machine.Hash _initialHash) + constructor(Machine.Hash _initialHash, IDataProvider _provider) LeafTournament() - RootTournament(_initialHash) + RootTournament(_initialHash, _provider) {} } diff --git a/prt/contracts/src/tournament/concretes/TopTournament.sol b/prt/contracts/src/tournament/concretes/TopTournament.sol index 5851cd96..e19420c0 100644 --- a/prt/contracts/src/tournament/concretes/TopTournament.sol +++ b/prt/contracts/src/tournament/concretes/TopTournament.sol @@ -14,6 +14,7 @@ import "../../Machine.sol"; contract TopTournament is NonLeafTournament, RootTournament { constructor( Machine.Hash _initialHash, + IDataProvider _provider, IMultiLevelTournamentFactory _factory - ) NonLeafTournament(_factory) RootTournament(_initialHash) {} + ) NonLeafTournament(_factory) RootTournament(_initialHash, _provider) {} } diff --git a/prt/contracts/src/tournament/factories/MultiLevelTournamentFactory.sol b/prt/contracts/src/tournament/factories/MultiLevelTournamentFactory.sol index 8a357d16..a28ad1eb 100644 --- a/prt/contracts/src/tournament/factories/MultiLevelTournamentFactory.sol +++ b/prt/contracts/src/tournament/factories/MultiLevelTournamentFactory.sol @@ -24,21 +24,22 @@ contract MultiLevelTournamentFactory is IMultiLevelTournamentFactory { bottomFactory = _bottomFactory; } - function instantiate(Machine.Hash _initialHash, IDataProvider) + function instantiate(Machine.Hash _initialHash, IDataProvider _provider) external override returns (ITournament) { - TopTournament _tournament = this.instantiateTop(_initialHash); + TopTournament _tournament = this.instantiateTop(_initialHash, _provider); emit tournamentCreated(_tournament); return _tournament; } - function instantiateTop(Machine.Hash _initialHash) + function instantiateTop(Machine.Hash _initialHash, IDataProvider _provider) external returns (TopTournament) { - TopTournament _tournament = topFactory.instantiate(_initialHash); + TopTournament _tournament = + topFactory.instantiate(_initialHash, _provider); return _tournament; } @@ -50,7 +51,8 @@ contract MultiLevelTournamentFactory is IMultiLevelTournamentFactory { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) external returns (MiddleTournament) { MiddleTournament _tournament = middleFactory.instantiate( _initialHash, @@ -60,7 +62,8 @@ contract MultiLevelTournamentFactory is IMultiLevelTournamentFactory { _contestedFinalStateTwo, _allowance, _startCycle, - _level + _level, + _provider ); return _tournament; @@ -74,7 +77,8 @@ contract MultiLevelTournamentFactory is IMultiLevelTournamentFactory { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) external returns (BottomTournament) { BottomTournament _tournament = bottomFactory.instantiate( _initialHash, @@ -84,7 +88,8 @@ contract MultiLevelTournamentFactory is IMultiLevelTournamentFactory { _contestedFinalStateTwo, _allowance, _startCycle, - _level + _level, + _provider ); return _tournament; diff --git a/prt/contracts/src/tournament/factories/SingleLevelTournamentFactory.sol b/prt/contracts/src/tournament/factories/SingleLevelTournamentFactory.sol index 21f5bd83..0b1fb6f4 100644 --- a/prt/contracts/src/tournament/factories/SingleLevelTournamentFactory.sol +++ b/prt/contracts/src/tournament/factories/SingleLevelTournamentFactory.sol @@ -9,22 +9,22 @@ import "../../ITournamentFactory.sol"; contract SingleLevelTournamentFactory is ITournamentFactory { constructor() {} - function instantiateSingleLevel(Machine.Hash _initialHash) - external - returns (SingleLevelTournament) - { + function instantiateSingleLevel( + Machine.Hash _initialHash, + IDataProvider _provider + ) external returns (SingleLevelTournament) { SingleLevelTournament _tournament = - new SingleLevelTournament(_initialHash); + new SingleLevelTournament(_initialHash, _provider); emit tournamentCreated(_tournament); return _tournament; } - function instantiate(Machine.Hash _initialHash, IDataProvider) + function instantiate(Machine.Hash _initialHash, IDataProvider _provider) external returns (ITournament) { - return this.instantiateSingleLevel(_initialHash); + return this.instantiateSingleLevel(_initialHash, _provider); } } diff --git a/prt/contracts/src/tournament/factories/multilevel/BottomTournamentFactory.sol b/prt/contracts/src/tournament/factories/multilevel/BottomTournamentFactory.sol index 868ebf1a..82b5b23d 100644 --- a/prt/contracts/src/tournament/factories/multilevel/BottomTournamentFactory.sol +++ b/prt/contracts/src/tournament/factories/multilevel/BottomTournamentFactory.sol @@ -16,7 +16,8 @@ contract BottomTournamentFactory { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) external returns (BottomTournament) { BottomTournament _tournament = new BottomTournament( _initialHash, @@ -26,7 +27,8 @@ contract BottomTournamentFactory { _contestedFinalStateTwo, _allowance, _startCycle, - _level + _level, + _provider ); return _tournament; diff --git a/prt/contracts/src/tournament/factories/multilevel/MiddleTournamentFactory.sol b/prt/contracts/src/tournament/factories/multilevel/MiddleTournamentFactory.sol index 3e0425ae..a1138cb9 100644 --- a/prt/contracts/src/tournament/factories/multilevel/MiddleTournamentFactory.sol +++ b/prt/contracts/src/tournament/factories/multilevel/MiddleTournamentFactory.sol @@ -23,7 +23,8 @@ contract MiddleTournamentFactory { Machine.Hash _contestedFinalStateTwo, Time.Duration _allowance, uint256 _startCycle, - uint64 _level + uint64 _level, + IDataProvider _provider ) external returns (MiddleTournament) { MiddleTournament _tournament = new MiddleTournament( _initialHash, @@ -34,6 +35,7 @@ contract MiddleTournamentFactory { _allowance, _startCycle, _level, + _provider, IMultiLevelTournamentFactory(msg.sender) ); diff --git a/prt/contracts/src/tournament/factories/multilevel/TopTournamentFactory.sol b/prt/contracts/src/tournament/factories/multilevel/TopTournamentFactory.sol index 7da89000..650037dd 100644 --- a/prt/contracts/src/tournament/factories/multilevel/TopTournamentFactory.sol +++ b/prt/contracts/src/tournament/factories/multilevel/TopTournamentFactory.sol @@ -8,12 +8,12 @@ import "../../concretes/TopTournament.sol"; contract TopTournamentFactory { constructor() {} - function instantiate(Machine.Hash _initialHash) + function instantiate(Machine.Hash _initialHash, IDataProvider _provider) external returns (TopTournament) { TopTournament _tournament = new TopTournament( - _initialHash, IMultiLevelTournamentFactory(msg.sender) + _initialHash, _provider, IMultiLevelTournamentFactory(msg.sender) ); return _tournament; diff --git a/prt/contracts/src/tournament/libs/Commitment.sol b/prt/contracts/src/tournament/libs/Commitment.sol index b9a31f4a..4a09bbae 100644 --- a/prt/contracts/src/tournament/libs/Commitment.sol +++ b/prt/contracts/src/tournament/libs/Commitment.sol @@ -24,9 +24,7 @@ library Commitment { Tree.Node expectedCommitment = getRoot(Machine.Hash.unwrap(state), treeHeight, position, hashProof); - require( - commitment.eq(expectedCommitment), "commitment state doesn't match" - ); + require(commitment.eq(expectedCommitment), "commitment state mismatch"); } function isEven(uint256 x) private pure returns (bool) { diff --git a/prt/contracts/src/tournament/libs/Match.sol b/prt/contracts/src/tournament/libs/Match.sol index 2a26a1d1..4394ccb0 100644 --- a/prt/contracts/src/tournament/libs/Match.sol +++ b/prt/contracts/src/tournament/libs/Match.sol @@ -117,6 +117,10 @@ library Match { emit matchAdvanced(id.hashFromId(), state.otherParent, state.leftNode); } + error IncorrectAgreeState( + Machine.Hash initialState, Machine.Hash agreeState + ); + function sealMatch( State storage state, Id calldata id, @@ -141,7 +145,10 @@ library Match { // Prove initial hash is in commitment if (state.runningLeafPosition == 0) { - require(agreeState.eq(initialState), "agree hash incorrect"); + require( + agreeState.eq(initialState), + IncorrectAgreeState(initialState, agreeState) + ); } else { Tree.Node commitment; if (state.height() % 2 == 1) { diff --git a/prt/contracts/test/MultiTournament.t.sol b/prt/contracts/test/MultiTournament.t.sol index 8c3acdb4..a8056a08 100644 --- a/prt/contracts/test/MultiTournament.t.sol +++ b/prt/contracts/test/MultiTournament.t.sol @@ -389,7 +389,7 @@ contract MultiTournamentTest is Util, Test { _match = middleTournament.getMatch(_matchId.hashFromId()); assertTrue(_match.exists(), "match should exist"); - vm.expectRevert("cannot win by timeout"); + vm.expectRevert(Tournament.WinByTimeout.selector); middleTournament.winMatchByTimeout( _matchId, playerNodes[1][ArbitrationConstants.height(1) - 1], @@ -457,7 +457,7 @@ contract MultiTournamentTest is Util, Test { vm.warp(_rootTournamentFinish - 1); // cannot eliminate match when both blocks still have time - vm.expectRevert("cannot eliminate by timeout"); + vm.expectRevert(Tournament.EliminateByTimeout.selector); topTournament.eliminateMatchByTimeout(_matchId); vm.warp(_rootTournamentFinish); diff --git a/prt/contracts/test/TournamentFactory.t.sol b/prt/contracts/test/TournamentFactory.t.sol index 2c142b9d..d3fd82fe 100644 --- a/prt/contracts/test/TournamentFactory.t.sol +++ b/prt/contracts/test/TournamentFactory.t.sol @@ -32,7 +32,11 @@ contract TournamentFactoryTest is Util, Test { function testRootTournament() public { RootTournament rootTournament = RootTournament( - address(singleLevelfactory.instantiateSingleLevel(Util.ONE_STATE)) + address( + singleLevelfactory.instantiateSingleLevel( + Util.ONE_STATE, IDataProvider(address(0)) + ) + ) ); (uint64 _max_level, uint64 _level, uint64 _log2step, uint64 _height) = @@ -49,7 +53,11 @@ contract TournamentFactoryTest is Util, Test { ); rootTournament = RootTournament( - address(multiLevelfactory.instantiateTop(Util.ONE_STATE)) + address( + multiLevelfactory.instantiateTop( + Util.ONE_STATE, IDataProvider(address(0)) + ) + ) ); (_max_level, _level, _log2step, _height) = diff --git a/prt/contracts/test/Util.sol b/prt/contracts/test/Util.sol index baea76a0..1b7765fa 100644 --- a/prt/contracts/test/Util.sol +++ b/prt/contracts/test/Util.sol @@ -66,7 +66,18 @@ contract Util { } } - function generateProof(uint256 _player, uint64 _height) + function generateDivergenceProof(uint256 _player, uint64 _height) + internal + view + returns (bytes32[] memory) + { + bytes32[] memory _proof = generateFinalStateProof(_player, _height); + _proof[0] = Tree.Node.unwrap(playerNodes[_player][0]); + + return _proof; + } + + function generateFinalStateProof(uint256 _player, uint64 _height) internal view returns (bytes32[] memory) @@ -153,8 +164,11 @@ contract Util { internal returns (TopTournament _topTournament) { - _topTournament = - TopTournament(address(_factory.instantiateTop(ONE_STATE))); + _topTournament = TopTournament( + address( + _factory.instantiateTop(ONE_STATE, IDataProvider(address(0))) + ) + ); // player 0 joins tournament joinTournament(_topTournament, 0, 0); } @@ -168,21 +182,27 @@ contract Util { if (_player == 0) { _tournament.joinTournament( ONE_STATE, - generateProof(_player, ArbitrationConstants.height(_level)), + generateFinalStateProof( + _player, ArbitrationConstants.height(_level) + ), playerNodes[0][ArbitrationConstants.height(_level) - 1], playerNodes[0][ArbitrationConstants.height(_level) - 1] ); } else if (_player == 1) { _tournament.joinTournament( TWO_STATE, - generateProof(_player, ArbitrationConstants.height(_level)), + generateFinalStateProof( + _player, ArbitrationConstants.height(_level) + ), playerNodes[1][ArbitrationConstants.height(_level) - 1], playerNodes[1][ArbitrationConstants.height(_level) - 1] ); } else if (_player == 2) { _tournament.joinTournament( TWO_STATE, - generateProof(_player, ArbitrationConstants.height(_level)), + generateFinalStateProof( + _player, ArbitrationConstants.height(_level) + ), playerNodes[0][ArbitrationConstants.height(_level) - 1], playerNodes[2][ArbitrationConstants.height(_level) - 1] ); @@ -203,7 +223,7 @@ contract Util { _left, _right, ONE_STATE, - generateProof(_player, ArbitrationConstants.height(0)) + generateDivergenceProof(_player, ArbitrationConstants.height(0)) ); } @@ -233,7 +253,7 @@ contract Util { _left, _right, ONE_STATE, - generateProof(_player, ArbitrationConstants.height(0)) + generateDivergenceProof(_player, ArbitrationConstants.height(0)) ); } diff --git a/prt/measure_constants/Dockerfile b/prt/measure_constants/Dockerfile index a83e94e5..ca3c4202 100644 --- a/prt/measure_constants/Dockerfile +++ b/prt/measure_constants/Dockerfile @@ -20,7 +20,7 @@ RUN curl -sSL https://github.com/cartesi/dave/releases/download/v0.0.10-rc-test/ COPY chronos.c . COPY Makefile . -RUN make +RUN make chronos.so COPY measure.lua . RUN chmod +x measure.lua diff --git a/prt/measure_constants/Makefile b/prt/measure_constants/Makefile index c6163469..145019e2 100644 --- a/prt/measure_constants/Makefile +++ b/prt/measure_constants/Makefile @@ -13,6 +13,24 @@ else DLLFLAGS=-shared -fPIC endif +create-image: + @docker build -t cartesi/measure_script . + +measure-simple: create-image + @docker run --rm \ + --env MACHINE_PATH=simple-program \ + cartesi/measure_script:latest + +measure-stress: create-image + @docker run --rm \ + --env MACHINE_PATH=debootstrap-machine-sparsed \ + cartesi/measure_script:latest + +measure-doom: create-image + @docker run --rm \ + --env MACHINE_PATH=doom-compute-machine \ + cartesi/measure_script:latest + chronos.so: $(FILES) env $(CC) $(DLLFLAGS) $(CFLAGS) chronos.c -o chronos.so diff --git a/prt/measure_constants/measure.lua b/prt/measure_constants/measure.lua index daa23634..f3ce8e63 100644 --- a/prt/measure_constants/measure.lua +++ b/prt/measure_constants/measure.lua @@ -26,6 +26,9 @@ local log2_uarch_span = 20 -- Log2 of maximum mcycle value local log2_emulator_span = 48 +-- Log2 of maximum inputs per echo +local log2_input_span = 24 + -- Big Machine increment roughly 2^26 big instructions per second local default_log2_big_machine_span = 26 local default_big_machine_span = 1 << default_log2_big_machine_span @@ -202,11 +205,11 @@ end assert(root_tournament_slowdown > 10, "root_tournament_slowdown must be greater than 1") print(string.format([[ -Starting measurements... +Starting measurements for %s... Target root slowdown is set to `%.1fx` slower. Inner tournament running time is set to `%d` minutes. -]], root_tournament_slowdown / 10, inner_tournament_timeout)) +]], machine_path, root_tournament_slowdown / 10, inner_tournament_timeout)) -- Result variables @@ -254,7 +257,7 @@ repeat output_results() print "CONTINUE\n" else - table.insert(heights, 1, log2_emulator_span + log2_uarch_span - log2_strides[1]) + table.insert(heights, 1, log2_input_span + log2_emulator_span + log2_uarch_span - log2_strides[1]) output_results() print "FINISHED\n" return diff --git a/prt/tests/compute-rs/Cargo.toml b/prt/tests/compute-rs/Cargo.toml index a1f405eb..ea1eaba1 100644 --- a/prt/tests/compute-rs/Cargo.toml +++ b/prt/tests/compute-rs/Cargo.toml @@ -18,7 +18,7 @@ repository = "https://github.com/cartesi/dave" cartesi-prt-core = { path = "../../client-rs" } anyhow = "1.0" -alloy = { version = "0.3.1", features = ["sol-types"] } +alloy = { version = "0.8.0", features = ["sol-types"] } clap = { version = "4.5", features = ["derive", "env"] } env_logger = "0.11.5" log = "0.4" diff --git a/prt/tests/compute-rs/Dockerfile b/prt/tests/compute-rs/Dockerfile index 08ababec..71854c30 100644 --- a/prt/tests/compute-rs/Dockerfile +++ b/prt/tests/compute-rs/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.79.0-bookworm AS chef +FROM rust:1.81.0-bookworm AS chef ENV CARGO_REGISTRIES_CARTESI_INDEX=https://github.com/cartesi/crates-index RUN rustup component add rustfmt @@ -9,8 +9,8 @@ RUN apt-get update && \ FROM chef AS planner COPY ./machine/rust-bindings /app/machine/rust-bindings COPY ./common-rs /app/common-rs -COPY ./prt/contract-bindings /app/prt/contract-bindings COPY ./prt/client-rs /app/prt/client-rs +COPY ./prt/contract-bindings /app/prt/contract-bindings COPY ./prt/tests/compute-rs /app/prt/tests/compute-rs WORKDIR /app/prt/tests/compute-rs @@ -29,7 +29,7 @@ WORKDIR /app/prt/tests/compute-rs RUN cargo chef cook --release --recipe-path recipe.json # Build application -COPY --from=ethereum/solc:0.8.23 /usr/bin/solc /usr/bin/solc +COPY --from=ethereum/solc:0.8.27 /usr/bin/solc /usr/bin/solc RUN chmod u+x /usr/bin/solc COPY ./prt /app/prt @@ -42,7 +42,7 @@ FROM --platform=linux/amd64 cartesi/machine-emulator:0.18.1 USER root RUN apt-get update && \ apt-get install -y procps curl xxd clang -ENV FOUNDRY_NIGHTLY nightly-5b7e4cb3c882b28f3c32ba580de27ce7381f415a +ENV FOUNDRY_NIGHTLY nightly-805d7cee81e78e9163b8ce3d86a0c3beb39772d4 RUN curl -sSL https://github.com/foundry-rs/foundry/releases/download/${FOUNDRY_NIGHTLY}/foundry_nightly_linux_$(dpkg --print-architecture).tar.gz | \ tar -zx -C /usr/local/bin @@ -67,7 +67,5 @@ WORKDIR /root/prt/contracts RUN forge --version RUN forge build -RUN mkdir -p /dispute_data/0xa16E02E87b7454126E5E10d957A927A7F5B5d2be - WORKDIR /root/prt/tests/compute -ENTRYPOINT ["./compute-test-entrypoint.sh"] +ENTRYPOINT ["./entrypoint.sh"] diff --git a/prt/tests/compute-rs/Makefile b/prt/tests/compute-rs/Makefile index e9ac7736..0e8f98a3 100644 --- a/prt/tests/compute-rs/Makefile +++ b/prt/tests/compute-rs/Makefile @@ -16,19 +16,19 @@ create-image: @docker build -t cartesi/prt-compute:rs -f Dockerfile ../../../ test-simple: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test-simple-rs \ --env MACHINE_PATH=$(SIMPLE_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:rs test-stress: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test-stress-rs \ --env MACHINE_PATH=$(STRESS_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:rs test-doom: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test-doom-rs \ --env MACHINE_PATH=$(DOOM_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:rs diff --git a/prt/tests/compute-rs/src/main.rs b/prt/tests/compute-rs/src/main.rs index 1a10def2..8412e7ae 100644 --- a/prt/tests/compute-rs/src/main.rs +++ b/prt/tests/compute-rs/src/main.rs @@ -23,10 +23,10 @@ async fn main() -> Result<()> { let config = ComputeConfig::parse(); let blockchain_config = config.blockchain_config; - let sender = EthArenaSender::new(&blockchain_config)?; + let mut player = Player::new( - Vec::new(), + None, Vec::new(), &blockchain_config, config.machine_path, diff --git a/prt/tests/compute/Dockerfile b/prt/tests/compute/Dockerfile index 4a7ea964..5437334d 100644 --- a/prt/tests/compute/Dockerfile +++ b/prt/tests/compute/Dockerfile @@ -3,7 +3,7 @@ FROM cartesi/machine-emulator:0.18.1 USER 0 RUN apt-get -y update && \ apt-get -y install curl gcc imagemagick make procps xxd pkg-config -ENV FOUNDRY_NIGHTLY nightly-5b7e4cb3c882b28f3c32ba580de27ce7381f415a +ENV FOUNDRY_NIGHTLY nightly-805d7cee81e78e9163b8ce3d86a0c3beb39772d4 RUN curl -sSL https://github.com/foundry-rs/foundry/releases/download/${FOUNDRY_NIGHTLY}/foundry_nightly_linux_$(dpkg --print-architecture).tar.gz | \ tar -zx -C /usr/local/bin @@ -39,14 +39,13 @@ COPY ./prt/client-lua/ . WORKDIR "/app/tests/compute" COPY ./prt/tests/compute/ . -RUN chmod +x compute-test-entrypoint.sh +RUN chmod +x entrypoint.sh RUN chmod +x prt_compute.lua RUN chmod +x doom_showcase/process_doom_graphics.lua WORKDIR "/app" RUN mkdir -p pixels RUN mkdir -p outputs -RUN mkdir -p /dispute_data/0xa16E02E87b7454126E5E10d957A927A7F5B5d2be WORKDIR "/app/tests/compute" -ENTRYPOINT ["./compute-test-entrypoint.sh"] +ENTRYPOINT ["./entrypoint.sh"] diff --git a/prt/tests/compute/Makefile b/prt/tests/compute/Makefile index 3ceca61d..2bc9f63a 100644 --- a/prt/tests/compute/Makefile +++ b/prt/tests/compute/Makefile @@ -16,19 +16,19 @@ create-image: @docker build -t cartesi/prt-compute:lua -f Dockerfile ../../../ test-simple: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test-simple \ --env MACHINE_PATH=$(SIMPLE_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:lua test-stress: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test-stress \ --env MACHINE_PATH=$(STRESS_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:lua test-doom: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test-doom \ --env MACHINE_PATH=$(DOOM_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:lua @@ -41,7 +41,7 @@ clean-graphics: @rm -r pixels outputs test-doom-with-graphics: create-image create-doom-dirs - @docker run --rm \ + @docker run --rm --name prt-compute-test-doom-with-graphics \ --env MACHINE_PATH=$(DOOM_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ --mount type=bind,source="$(shell pwd)/pixels",target=/app/pixels \ diff --git a/prt/tests/compute/README.md b/prt/tests/compute/README.md index 580c4e67..c01935d6 100644 --- a/prt/tests/compute/README.md +++ b/prt/tests/compute/README.md @@ -2,7 +2,7 @@ This directory contains a prototype node written in Lua. The purpose of this Lua node is testing and prototyping only; the real production node is written in Rust. -Furthermore, this node implements only compute (_i.e._ a one-shot computation, like a rollups with no inputs). +Furthermore, this node implements only compute (_i.e._ a one-shot computation, the machine doesn't yield for inputs). Remember to either clone the repository with the flag `--recurse-submodules`, or run `git submodule update --recursive --init` after cloning. You need a docker installation to run the Dave Lua node. @@ -38,7 +38,7 @@ These players come in multiple flavours: If no other player is actively defending this claim, it will lose by timeout. To add more players of different kinds, you can edit the [`prt_compute.lua`](prt_compute.lua) file. -To run the full example, execute one of the following commands from the current path path (_i.e._ [`prt/lua_poc`](.)): +To run the full example, execute one of the following commands from the current path: ``` make test-simple diff --git a/prt/tests/compute/blockchain/node.lua b/prt/tests/compute/blockchain/node.lua index 1b315ef0..c7d5ba94 100644 --- a/prt/tests/compute/blockchain/node.lua +++ b/prt/tests/compute/blockchain/node.lua @@ -2,10 +2,12 @@ local helper = require "utils.helper" local default_account_number = 40 +-- spawn an anvil node with 40 accounts, auto-mine, and finalize block at height N-2 local function start_blockchain() print(string.format("Starting blockchain with %d accounts...", default_account_number)) - local cmd = string.format([[sh -c "echo $$ ; exec anvil --block-time 1 -a %d > anvil.log 2>&1"]], + local cmd = string.format( + [[sh -c "echo $$ ; exec anvil --block-time 1 --slots-in-an-epoch 1 -a %d > anvil.log 2>&1"]], default_account_number) local reader = io.popen(cmd) diff --git a/prt/tests/compute/blockchain/utils.lua b/prt/tests/compute/blockchain/utils.lua index dc0cc0be..508fba2f 100644 --- a/prt/tests/compute/blockchain/utils.lua +++ b/prt/tests/compute/blockchain/utils.lua @@ -19,9 +19,9 @@ local function advance_time(seconds, endpoint) end end -local deploy_cmd = [[sh -c "cd ../../contracts && ./deploy_anvil.sh"]] -local function deploy_contracts() - local reader = io.popen(deploy_cmd) +local deploy_cmd = [[sh -c "cd %s && ./deploy_anvil.sh"]] +local function deploy_contracts(contracts_path) + local reader = io.popen(string.format(deploy_cmd, contracts_path)) assert(reader, "Failed to open process for deploy command: " .. deploy_cmd) local output = reader:read("*a") local success = reader:close() diff --git a/prt/tests/compute/compute-test-entrypoint.sh b/prt/tests/compute/entrypoint.sh similarity index 100% rename from prt/tests/compute/compute-test-entrypoint.sh rename to prt/tests/compute/entrypoint.sh diff --git a/prt/tests/compute/prt_compute.lua b/prt/tests/compute/prt_compute.lua index 94bd9b61..2adf0f18 100755 --- a/prt/tests/compute/prt_compute.lua +++ b/prt/tests/compute/prt_compute.lua @@ -36,30 +36,44 @@ local function write_json_file(leafs, root_tournament) local flat = require "utils.flat" local json = require "utils.json" - local file_path = string.format("/dispute_data/%s/inputs_and_leafs.json", root_tournament) + local work_path = string.format("/compute_data/%s", root_tournament) + if not helper.exists(work_path) then + helper.mkdir_p(work_path) + end + local file_path = string.format("%s/inputs_and_leafs.json", work_path) local file = assert(io.open(file_path, "w")) file:write(json.encode(flat.flatten(inputs_and_leafs).flat_object)) assert(file:close()) end +local function get_root_constants(root_tournament) + local Reader = require "player.reader" + local reader = Reader:new(blockchain_constants.endpoint) + local root_constants = reader:read_constants(root_tournament) + + return root_constants +end + -- Function to setup players -local function setup_players(use_lua_node, extra_data, root_constants, root_tournament, machine_path) +local function setup_players(use_lua_node, extra_data, root_tournament, machine_path) + local root_constants = get_root_constants(root_tournament) + + local inputs = nil local player_coroutines = {} local player_index = 1 print("Calculating root commitment...") - local snapshot_dir = string.format("/dispute_data/%s", root_tournament) + local snapshot_dir = string.format("/compute_data/%s", root_tournament) local builder = CommitmentBuilder:new(machine_path, snapshot_dir) - local root_commitment = builder:build(0, 0, root_constants.log2_step, root_constants.height) + local root_commitment = builder:build(0, 0, root_constants.log2_step, root_constants.height, inputs) if use_lua_node then -- use Lua node to defend print("Setting up Lua honest player") local start_hero = require "runners.hero_runner" player_coroutines[player_index] = start_hero(player_index, machine_path, root_commitment, root_tournament, - extra_data) + extra_data, inputs) else -- use Rust node to defend - print("Setting up Rust honest player") local rust_hero_runner = require "runners.rust_hero_runner" player_coroutines[player_index] = rust_hero_runner.create_react_once_runner(player_index, machine_path) @@ -73,7 +87,7 @@ local function setup_players(use_lua_node, extra_data, root_constants, root_tour local scoped_require = new_scoped_require(_ENV) local start_sybil = scoped_require "runners.sybil_runner" player_coroutines[player_index] = start_sybil(player_index, machine_path, root_commitment, root_tournament, - FAKE_COMMITMENT_COUNT) + FAKE_COMMITMENT_COUNT, inputs) player_index = player_index + 1 end @@ -90,12 +104,38 @@ local function setup_players(use_lua_node, extra_data, root_constants, root_tour return player_coroutines end -local function get_root_constants(root_tournament) - local Reader = require "player.reader" - local reader = Reader:new(blockchain_constants.endpoint) - local root_constants = reader:read_constants(root_tournament) +-- Function to run players +local function run_players(player_coroutines) + while true do + local idle = true + local has_live_coroutine = false + for i, c in ipairs(player_coroutines) do + if c then + local success, ret = coroutine.resume(c) + local status = coroutine.status(c) + + if status == "dead" then + player_coroutines[i] = false + end + if not success then + print(string.format("coroutine %d fail to resume with error: %s", i, ret)) + elseif ret then + has_live_coroutine = true + idle = idle and ret.idle + end + end + end - return root_constants + if not has_live_coroutine then + print("No active players, ending program...") + break + end + + if idle then + print(string.format("All players idle, fastforward blockchain for %d seconds...", FAST_FORWARD_TIME)) + blockchain_utils.advance_time(FAST_FORWARD_TIME, blockchain_constants.endpoint) + end + end end -- Main Execution @@ -107,42 +147,12 @@ local root_tournament = blockchain_constants.root_tournament local blockchain_node = Blockchain:new() time.sleep(NODE_DELAY) -blockchain_utils.deploy_contracts() +blockchain_utils.deploy_contracts("../../contracts") time.sleep(NODE_DELAY) -local root_constants = get_root_constants(root_tournament) -local player_coroutines = setup_players(use_lua_node, extra_data, root_constants, root_tournament, machine_path) -print("Hello from Dave lua prototype!") - -while true do - local idle = true - local has_live_coroutine = false - for i, c in ipairs(player_coroutines) do - if c then - local success, ret = coroutine.resume(c) - local status = coroutine.status(c) - - if status == "dead" then - player_coroutines[i] = false - end - if not success then - print(string.format("coroutine %d fail to resume with error: %s", i, ret)) - elseif ret then - has_live_coroutine = true - idle = idle and ret.idle - end - end - end +local player_coroutines = setup_players(use_lua_node, extra_data, root_tournament, machine_path) +print("Hello from Dave compute lua prototype!") - if not has_live_coroutine then - print("No active players, ending program...") - break - end - - if idle then - print(string.format("All players idle, fastforward blockchain for %d seconds...", FAST_FORWARD_TIME)) - blockchain_utils.advance_time(FAST_FORWARD_TIME, blockchain_constants.endpoint) - end -end +run_players(player_coroutines) print("Good-bye, world!") diff --git a/prt/tests/compute/runners/helpers/fake_commitment.lua b/prt/tests/compute/runners/helpers/fake_commitment.lua index 24b583a0..1e88ac92 100644 --- a/prt/tests/compute/runners/helpers/fake_commitment.lua +++ b/prt/tests/compute/runners/helpers/fake_commitment.lua @@ -65,7 +65,7 @@ local function rebuild_nested_trees(leafs) end local function build_commitment(cached_commitments, machine_path, snapshot_dir, base_cycle, level, log2_stride, - log2_stride_count) + log2_stride_count, inputs) -- the honest commitment builder should be operated in an isolated env -- to avoid side effects to the strategy behavior @@ -80,7 +80,7 @@ local function build_commitment(cached_commitments, machine_path, snapshot_dir, local CommitmentBuilder = scoped_require "computation.commitment" local builder = CommitmentBuilder:new(machine_path, snapshot_dir) - local commitment = builder:build(base_cycle, level, log2_stride, log2_stride_count) + local commitment = builder:build(base_cycle, level, log2_stride, log2_stride_count, inputs) coroutine.yield(commitment) end) @@ -99,9 +99,11 @@ local function build_fake_commitment(commitment, fake_index, log2_stride) local fake_hash = get_fake_hash(log2_stride) local leaf_index = math.max(#commitment.leafs - fake_index + 1, 1) - local old_leaf = fake_builder.leafs[leaf_index] + for i = leaf_index, #commitment.leafs do + local old_leaf = fake_builder.leafs[i] + fake_builder.leafs[i] = { hash = fake_hash, accumulated_count = old_leaf.accumulated_count } + end - fake_builder.leafs[leaf_index] = { hash = fake_hash, accumulated_count = old_leaf.accumulated_count } update_scope_of_hashes(fake_builder.leafs) rebuild_nested_trees(fake_builder.leafs) @@ -124,7 +126,7 @@ function FakeCommitmentBuilder:new(machine_path, root_commitment, snapshot_dir) return c end -function FakeCommitmentBuilder:build(base_cycle, level, log2_stride, log2_stride_count) +function FakeCommitmentBuilder:build(base_cycle, level, log2_stride, log2_stride_count, inputs) -- function caller should set `self.fake_index` properly before calling this function -- the fake commitments are not guaranteed to be unique if there are not many leafs (short computation) -- `self.fake_index` is reset and the end of a successful call to ensure the next caller must set it again. @@ -141,11 +143,12 @@ function FakeCommitmentBuilder:build(base_cycle, level, log2_stride, log2_stride local commitment = build_commitment(self.commitments, self.machine_path, self.snapshot_dir, base_cycle, level, log2_stride, - log2_stride_count) + log2_stride_count, + inputs) + print("honest commitment", commitment) local fake_commitment = build_fake_commitment(commitment, self.fake_index, log2_stride) self.fake_commitments[level][base_cycle][self.fake_index] = fake_commitment - self.fake_index = false return fake_commitment end diff --git a/prt/tests/compute/runners/hero_runner.lua b/prt/tests/compute/runners/hero_runner.lua index 60d50fe9..9b78f2b8 100755 --- a/prt/tests/compute/runners/hero_runner.lua +++ b/prt/tests/compute/runners/hero_runner.lua @@ -5,7 +5,7 @@ local HonestStrategy = require "player.strategy" local Sender = require "player.sender" local Player = require "player.player" -local function hero_runner(player_id, machine_path, root_commitment, root_tournament, extra_data) +local function hero_runner(player_id, machine_path, root_commitment, root_tournament, extra_data, inputs) local hook if extra_data then @@ -15,9 +15,10 @@ local function hero_runner(player_id, machine_path, root_commitment, root_tourna hook = false end - local snapshot_dir = string.format("/dispute_data/%s", root_tournament) + local snapshot_dir = string.format("/compute_data/%s", root_tournament) local strategy = HonestStrategy:new( CommitmentBuilder:new(machine_path, snapshot_dir, root_commitment), + inputs, machine_path, Sender:new(blockchain_consts.pks[player_id], player_id, blockchain_consts.endpoint) ) diff --git a/prt/tests/compute/runners/rust_hero_runner.lua b/prt/tests/compute/runners/rust_hero_runner.lua index 43ff5207..ebe4f6a0 100644 --- a/prt/tests/compute/runners/rust_hero_runner.lua +++ b/prt/tests/compute/runners/rust_hero_runner.lua @@ -28,7 +28,7 @@ end local function create_react_once_runner(player_id, machine_path) local rust_compute_cmd = string.format( [[sh -c "echo $$ ; exec env MACHINE_PATH='%s' RUST_LOG='info' \ - ./cartesi-prt-compute 2>&1 | tee -a honest.log"]], + ./cartesi-prt-compute 2>&1 | tee -a honest.log"]], machine_path) return coroutine.create(function() diff --git a/prt/tests/compute/runners/sybil_runner.lua b/prt/tests/compute/runners/sybil_runner.lua index be30ae14..babb9803 100755 --- a/prt/tests/compute/runners/sybil_runner.lua +++ b/prt/tests/compute/runners/sybil_runner.lua @@ -18,6 +18,7 @@ local function sybil_player(root_tournament, strategy, blockchain_endpoint, fake helper.log_timestamp(string.format("react with fake index: %d", i)) local log = strategy:react(state) + strategy.commitment_builder.fake_index = false idle = idle and log.idle finished = finished and log.finished end @@ -35,10 +36,11 @@ local function sybil_player(root_tournament, strategy, blockchain_endpoint, fake end -local function sybil_runner(player_id, machine_path, root_commitment, root_tournament, fake_commitment_count) - local snapshot_dir = string.format("/dispute_data/%s", root_tournament) +local function sybil_runner(player_id, machine_path, root_commitment, root_tournament, fake_commitment_count, inputs) + local snapshot_dir = string.format("/compute_data/%s", root_tournament) local strategy = HonestStrategy:new( FakeCommitmentBuilder:new(machine_path, root_commitment, snapshot_dir), + inputs, machine_path, Sender:new(blockchain_consts.pks[player_id], player_id, blockchain_consts.endpoint) ) diff --git a/prt/tests/rollups/Dockerfile b/prt/tests/rollups/Dockerfile new file mode 100644 index 00000000..e006bb45 --- /dev/null +++ b/prt/tests/rollups/Dockerfile @@ -0,0 +1,66 @@ +FROM rust:1.81.0-bookworm AS chef + +ENV CARGO_REGISTRIES_CARTESI_INDEX=https://github.com/cartesi/crates-index +RUN rustup component add rustfmt +RUN cargo install cargo-chef +RUN apt-get update && \ + apt-get install -y clang libslirp0 + +FROM chef AS planner +COPY ./machine/rust-bindings /app/machine/rust-bindings +COPY ./common-rs /app/common-rs +COPY ./prt/client-rs /app/prt/client-rs +COPY ./prt/contract-bindings /app/prt/contract-bindings +COPY ./cartesi-rollups/contract-bindings /app/cartesi-rollups/contract-bindings +COPY ./cartesi-rollups/node /app/cartesi-rollups/node + +WORKDIR /app/cartesi-rollups/node +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY ./machine /app/machine +COPY ./common-rs /app/common-rs +COPY ./prt/client-rs /app/prt/client-rs +COPY ./prt/contract-bindings /app/prt/contract-bindings +COPY ./cartesi-rollups/contract-bindings /app/cartesi-rollups/contract-bindings +COPY ./.git /app/.git +COPY --from=planner /app/cartesi-rollups/node/recipe.json /app/cartesi-rollups/node/recipe.json + +# Build dependencies - this is the caching Docker layer! +WORKDIR /app/cartesi-rollups/node +RUN cargo chef cook --release --recipe-path recipe.json + +# Build application +COPY --from=ethereum/solc:0.8.27 /usr/bin/solc /usr/bin/solc +RUN chmod u+x /usr/bin/solc + +COPY ./cartesi-rollups /app/cartesi-rollups + +WORKDIR /app/cartesi-rollups/node +RUN cargo build --release --bin dave-rollups + +FROM --platform=linux/amd64 cartesi/machine-emulator:0.18.1 + +USER root +RUN apt-get update && \ + apt-get install -y procps curl xxd clang sqlite3 +ENV FOUNDRY_NIGHTLY nightly-805d7cee81e78e9163b8ce3d86a0c3beb39772d4 +RUN curl -sSL https://github.com/foundry-rs/foundry/releases/download/${FOUNDRY_NIGHTLY}/foundry_nightly_linux_$(dpkg --print-architecture).tar.gz | \ + tar -zx -C /usr/local/bin + +# prepare echo machine +WORKDIR /root/program/ +COPY ./prt/tests/rollups/program/echo/echo-program.tar.gz /root/program/ +RUN tar -zx -f /root/program/echo-program.tar.gz + +COPY ./machine/step /root/machine/step +COPY ./prt /root/prt +COPY ./cartesi-rollups /root/cartesi-rollups +COPY --from=builder /app/cartesi-rollups/node/target/release/dave-rollups /root/prt/tests/rollups/dave-rollups + +WORKDIR /root/cartesi-rollups/contracts +RUN forge --version +RUN forge build + +WORKDIR /root/prt/tests/rollups +ENTRYPOINT ["./prt_rollups.lua"] diff --git a/prt/tests/rollups/Makefile b/prt/tests/rollups/Makefile new file mode 100644 index 00000000..8a5eb8a5 --- /dev/null +++ b/prt/tests/rollups/Makefile @@ -0,0 +1,18 @@ +ECHO_MACHINE_PATH := "/root/program/echo-program" + +help: + @echo ' create-image - create `prt-rollups:test` docker image' + @echo ' test-echo - run PRT rollups echo test' + +create-image: + @docker build -t cartesi/prt-rollups:test -f Dockerfile ../../../ + +test-echo: create-image + @docker run --rm --name prt-rollups-test-echo \ + --env MACHINE_PATH=$(ECHO_MACHINE_PATH) \ + cartesi/prt-rollups:test + + + + +.PHONY: help create-image test-echo diff --git a/prt/tests/rollups/README.md b/prt/tests/rollups/README.md new file mode 100644 index 00000000..296a1370 --- /dev/null +++ b/prt/tests/rollups/README.md @@ -0,0 +1,24 @@ +# PRT Rollups test + +This directory contains a rollups node written in Rust. +The node test will be conducted with a Lua orchestrator script spawning an honest rollups node in the background to advance the rollups states and to defend the application. The Lua orchestrator script also spawns multiple [dishonest nodes](../../../prt/tests/compute/README.md) trying to tamper with the rollups states. + +Remember to either clone the repository with the flag `--recurse-submodules`, or run `git submodule update --recursive --init` after cloning. +You need a docker installation to run the Dave Lua node. + +## Build test image + +In order to run tests in this directory, a docker image must be built to prepare the test environment. +Once the test image is built, the user can run all the tests supported by swapping the `MACHINE_PATH` env variable. + +``` +make create-image +``` + +## Run echo test + +A simple [echo program](./program/echo/) is provided to test the rollups. + +``` +make test-echo +``` diff --git a/prt/tests/rollups/dave/node.lua b/prt/tests/rollups/dave/node.lua new file mode 100644 index 00000000..4dcdf953 --- /dev/null +++ b/prt/tests/rollups/dave/node.lua @@ -0,0 +1,41 @@ +local helper = require "utils.helper" + +local function start_dave_node(machine_path, db_path, sleep_duration, verbosity, trace_level) + local cmd = string.format( + [[sh -c "echo $$ ; exec env MACHINE_PATH='%s' PATH_TO_DB='%s' \ + SLEEP_DURATION=%d RUST_BACKTRACE='%s' \ + RUST_LOG='none',cartesi_prt_core='%s',rollups_compute_runner='%s',rollups_epoch_manager='%s' \ + ./dave-rollups > dave.log 2>&1"]], + machine_path, db_path, sleep_duration, trace_level, verbosity, verbosity, verbosity) + + local reader = io.popen(cmd) + assert(reader, "`popen` returned nil reader") + + local pid = tonumber(reader:read()) + + local handle = { reader = reader, pid = pid } + setmetatable(handle, { + __gc = function(t) + helper.stop_pid(t.reader, t.pid) + end + }) + + print(string.format("Dave node running with pid %d", pid)) + return handle +end + +local Dave = {} +Dave.__index = Dave + +function Dave:new(machine_path, sleep_duration, verbosity, trace_level) + local n = {} + + local handle = start_dave_node(machine_path, "./dave.db", sleep_duration, verbosity, trace_level) + + n._handle = handle + + setmetatable(n, self) + return n +end + +return Dave diff --git a/prt/tests/rollups/dave/reader.lua b/prt/tests/rollups/dave/reader.lua new file mode 100644 index 00000000..de455335 --- /dev/null +++ b/prt/tests/rollups/dave/reader.lua @@ -0,0 +1,198 @@ +local eth_abi = require "utils.eth_abi" + +local function parse_topics(json) + local _, _, topics = json:find( + [==["topics":%[([^%]]*)%]]==] + ) + + local t = {} + for k, _ in string.gmatch(topics, [["(0x%x+)"]]) do + table.insert(t, k) + end + + return t +end + +local function parse_data(json, sig) + local _, _, data = json:find( + [==["data":"(0x%x+)"]==] + ) + + local decoded_data = eth_abi.decode_event_data(sig, data) + return decoded_data +end + +local function parse_meta(json) + local _, _, block_hash = json:find( + [==["blockHash":"(0x%x+)"]==] + ) + + local _, _, block_number = json:find( + [==["blockNumber":"(0x%x+)"]==] + ) + + local _, _, log_index = json:find( + [==["logIndex":"(0x%x+)"]==] + ) + + local t = { + block_hash = block_hash, + block_number = tonumber(block_number), + log_index = tonumber(log_index), + } + + return t +end + + +local function parse_logs(logs, data_sig) + local ret = {} + for k, _ in string.gmatch(logs, [[{[^}]*}]]) do + local emited_topics = parse_topics(k) + local decoded_data = parse_data(k, data_sig) + local meta = parse_meta(k) + table.insert(ret, { emited_topics = emited_topics, decoded_data = decoded_data, meta = meta }) + end + + return ret +end + +local Reader = {} +Reader.__index = Reader + +function Reader:new(endpoint) + local reader = { + endpoint = assert(endpoint) + } + + setmetatable(reader, self) + return reader +end + +local cast_logs_template = [==[ +cast rpc -r "%s" eth_getLogs \ + '[{"fromBlock": "earliest", "toBlock": "latest", "address": "%s", "topics": [%s]}]' -w 2>&1 +]==] + +function Reader:_read_logs(contract_address, sig, topics, data_sig) + topics = topics or { false, false, false } + local encoded_sig = eth_abi.encode_sig(sig) + table.insert(topics, 1, encoded_sig) + assert(#topics == 4, "topics doesn't have four elements") + + local topics_strs = {} + for _, v in ipairs(topics) do + local s + if v then + s = '"' .. v .. '"' + else + s = "null" + end + table.insert(topics_strs, s) + end + local topic_str = table.concat(topics_strs, ", ") + + local cmd = string.format( + cast_logs_template, + self.endpoint, + contract_address, + topic_str + ) + + local handle = io.popen(cmd) + assert(handle) + local logs = handle:read "*a" + handle:close() + + if logs:find "Error" then + error(string.format("Read logs `%s` failed:\n%s", sig, logs)) + end + + local ret = parse_logs(logs, data_sig) + return ret +end + +local cast_call_template = [==[ +cast call --rpc-url "%s" "%s" "%s" %s 2>&1 +]==] + +function Reader:_call(address, sig, args) + local quoted_args = {} + for _, v in ipairs(args) do + table.insert(quoted_args, '"' .. v .. '"') + end + local args_str = table.concat(quoted_args, " ") + + local cmd = string.format( + cast_call_template, + self.endpoint, + address, + sig, + args_str + ) + + local handle = io.popen(cmd) + assert(handle) + + local ret = {} + local str = handle:read() + while str do + if str:find "Error" or str:find "error" then + local err_str = handle:read "*a" + handle:close() + error(string.format("Call `%s` failed:\n%s%s", sig, str, err_str)) + end + + table.insert(ret, str) + str = handle:read() + end + handle:close() + + return ret +end + +function Reader:read_epochs_sealed(consensus_address) + local sig = "EpochSealed(uint256,uint256,uint256,bytes32,address)" + local data_sig = "(uint256,uint256,uint256,bytes32,address)" + + local logs = self:_read_logs(consensus_address, sig, { false, false, false }, data_sig) + + local ret = {} + for k, v in ipairs(logs) do + local log = {} + log.meta = v.meta + + log.epoch_number = tonumber(v.decoded_data[1]) + log.block_lower_bound = tonumber(v.decoded_data[2]) + log.block_upper_bound = tonumber(v.decoded_data[3]) + log.initial_machine_state_hash = v.decoded_data[4] + log.tournament = v.decoded_data[5] + + ret[k] = log + end + + return ret +end + +function Reader:read_inputs_added(input_box_address) + local sig = "InputAdded(address,uint256,bytes)" + local data_sig = "(bytes)" + + local logs = self:_read_logs(input_box_address, sig, { false, false, false }, data_sig) + + local ret = {} + for k, v in ipairs(logs) do + local log = {} + log.meta = v.meta + + log.app_contract = v.emited_topics[2] + log.index = tonumber(v.emited_topics[3]) + log.data = v.decoded_data[1] + + ret[k] = log + end + + return ret +end + +return Reader diff --git a/prt/tests/rollups/dave/sender.lua b/prt/tests/rollups/dave/sender.lua new file mode 100644 index 00000000..106be9ba --- /dev/null +++ b/prt/tests/rollups/dave/sender.lua @@ -0,0 +1,90 @@ +local Hash = require "cryptography.hash" +local MerkleTree = require "cryptography.merkle_tree" + +local function quote_args(args, not_quote) + local quoted_args = {} + for _, v in ipairs(args) do + if type(v) == "table" and (getmetatable(v) == Hash or getmetatable(v) == MerkleTree) then + if not_quote then + table.insert(quoted_args, v:hex_string()) + else + table.insert(quoted_args, '"' .. v:hex_string() .. '"') + end + elseif type(v) == "table" then + if v._tag == "tuple" then + local qa = quote_args(v, true) + local ca = table.concat(qa, ",") + local sb = "'(" .. ca .. ")'" + table.insert(quoted_args, sb) + else + local qa = quote_args(v, true) + local ca = table.concat(qa, ",") + local sb = "'[" .. ca .. "]'" + table.insert(quoted_args, sb) + end + elseif not_quote then + table.insert(quoted_args, tostring(v)) + else + table.insert(quoted_args, '"' .. v .. '"') + end + end + + return quoted_args +end + + +local Sender = {} +Sender.__index = Sender + +function Sender:new(pk, endpoint) + local sender = { + pk = pk, + endpoint = endpoint + } + + setmetatable(sender, self) + return sender +end + +local cast_send_template = [[ +cast send --private-key "%s" --rpc-url "%s" "%s" "%s" %s 2>&1 +]] + +function Sender:_send_tx(contract_address, sig, args) + local quoted_args = quote_args(args) + local args_str = table.concat(quoted_args, " ") + + local cmd = string.format( + cast_send_template, + self.pk, + self.endpoint, + contract_address, + sig, + args_str + ) + + local handle = io.popen(cmd) + assert(handle) + + local ret = handle:read "*a" + if ret:find "Error" then + handle:close() + error(string.format("Send transaction `%s` reverted:\n%s", cmd, ret)) + end + + self.tx_count = self.tx_count + 1 + handle:close() +end + +function Sender:tx_add_input(input_box_address, app_contract_address, payload) + local sig = [[addInput(address,bytes)(bytes32)]] + return pcall( + self._send_tx, + self, + input_box_address, + sig, + { app_contract_address, payload } + ) +end + +return Sender diff --git a/prt/tests/rollups/program/.dockerignore b/prt/tests/rollups/program/.dockerignore new file mode 100644 index 00000000..643b3a57 --- /dev/null +++ b/prt/tests/rollups/program/.dockerignore @@ -0,0 +1 @@ +**/*tar.gz diff --git a/prt/tests/rollups/program/.gitignore b/prt/tests/rollups/program/.gitignore new file mode 100644 index 00000000..643b3a57 --- /dev/null +++ b/prt/tests/rollups/program/.gitignore @@ -0,0 +1 @@ +**/*tar.gz diff --git a/prt/tests/rollups/program/echo/.gitignore b/prt/tests/rollups/program/echo/.gitignore new file mode 100644 index 00000000..15336dbe --- /dev/null +++ b/prt/tests/rollups/program/echo/.gitignore @@ -0,0 +1 @@ +echo-program/ diff --git a/prt/tests/rollups/program/echo/Dockerfile b/prt/tests/rollups/program/echo/Dockerfile new file mode 100644 index 00000000..9361a52b --- /dev/null +++ b/prt/tests/rollups/program/echo/Dockerfile @@ -0,0 +1,13 @@ +FROM cartesi/machine-emulator:0.18.1 +USER root +RUN apt-get update && \ + apt-get install -y wget + +RUN wget https://github.com/cartesi/image-kernel/releases/download/v0.20.0/linux-6.5.13-ctsi-1-v0.20.0.bin \ + -O ./linux.bin +RUN wget https://github.com/cartesi/machine-emulator-tools/releases/download/v0.16.1/rootfs-tools-v0.16.1.ext2 \ + -O ./rootfs.ext2 + +COPY ./gen_machine_echo.sh . +RUN chmod +x gen_machine_echo.sh +RUN ./gen_machine_echo.sh && tar -zvcf /echo-program.tar.gz echo-program diff --git a/prt/tests/rollups/program/echo/README.md b/prt/tests/rollups/program/echo/README.md new file mode 100644 index 00000000..f726fb17 --- /dev/null +++ b/prt/tests/rollups/program/echo/README.md @@ -0,0 +1,10 @@ +# Simple echo program + +## Generate program + +From this directory, run the following: + +``` +docker build -t echo:test . +docker cp $(docker create echo:test):/echo-program.tar.gz . +``` diff --git a/prt/tests/rollups/program/echo/gen_machine_echo.sh b/prt/tests/rollups/program/echo/gen_machine_echo.sh new file mode 100755 index 00000000..99252fff --- /dev/null +++ b/prt/tests/rollups/program/echo/gen_machine_echo.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cartesi-machine --ram-image=./linux.bin \ + --flash-drive=label:root,filename:./rootfs.ext2 \ + --no-rollback --store=./echo-program \ + -- "ioctl-echo-loop --vouchers=1 --notices=1 --reports=1 --verbose=1" diff --git a/prt/tests/rollups/prt_rollups.lua b/prt/tests/rollups/prt_rollups.lua new file mode 100755 index 00000000..cf91dec4 --- /dev/null +++ b/prt/tests/rollups/prt_rollups.lua @@ -0,0 +1,218 @@ +#!/usr/bin/lua +require "setup_path" + +-- amount of time sleep between each react +local SLEEP_TIME = 2 +-- amount of time to fastforward if `IDLE_LIMIT` is reached +local FAST_FORWARD_TIME = 20 +-- amount of time to fastforward to advance an epoch +-- local EPOCH_TIME = 60 * 60 * 24 * 7 +-- delay time for blockchain node to be ready +local NODE_DELAY = 3 +-- number of fake commitment to make +local FAKE_COMMITMENT_COUNT = 1 +-- number of idle players +local IDLE_PLAYER_COUNT = 0 +-- consensus contract address in anvil deployment +local CONSENSUS_ADDRESS = "0x5FC8d32690cc91D4c39d9d3abcBD16989F875707" +-- input contract address in anvil deployment +local INPUT_BOX_ADDRESS = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; +-- app contract address in anvil deployment +local APP_ADDRESS = "0x0000000000000000000000000000000000000000"; +-- Hello from Dave! +local ECHO_MSG = "0x48656c6c6f2076726f6d204461766521" +-- Encoded Input blob +-- 31337 +-- 0x0000000000000000000000000000000000000000 +-- 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 +-- 1 +-- 0 +-- 1 +-- 0 +-- "0x48656c6c6f2076726f6d204461766521" +-- cast abi-encode "EvmAdvance(uint256,address,address,uint256,uint256,uint256,uint256,bytes)" 31337 "0x0000000000000000000000000000000000000000" "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 1 1622547800 1 0 "0x48656c6c6f2076726f6d204461766521" +local ENCODED_INPUT = +"0x0000000000000000000000000000000000000000000000000000000000007a690000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000060b61d58000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000001048656c6c6f2076726f6d20446176652100000000000000000000000000000000" + +-- Required Modules +local new_scoped_require = require "utils.scoped_require" + +local helper = require "utils.helper" +local blockchain_utils = require "blockchain.utils" +local time = require "utils.time" +local blockchain_constants = require "blockchain.constants" +local Blockchain = require "blockchain.node" +local Dave = require "dave.node" +local Hash = require "cryptography.hash" +local Machine = require "computation.machine" +local MerkleBuilder = require "cryptography.merkle_builder" +local Reader = require "dave.reader" +local Sender = require "dave.sender" + +local ROOT_LEAFS_QUERY = +[[sqlite3 /compute_data/%s/db 'select level,base_cycle,compute_leaf_index,repetitions,HEX(compute_leaf) +from compute_leafs where level=0 ORDER BY compute_leaf_index ASC']] +local function build_root_commitment_from_db(machine_path, root_tournament) + local builder = MerkleBuilder:new() + local machine = Machine:new_from_path(machine_path) + local initial_state = machine:state() + + local handle = io.popen(string.format(ROOT_LEAFS_QUERY, root_tournament)) + assert(handle) + local rows = handle:read "*a" + handle:close() + + if rows:find "Error" then + error(string.format("Read leafs failed:\n%s", rows)) + end + + -- Iterate over each line in the input data + for line in rows:gmatch("[^\n]+") do + local _, _, _, repetitions, compute_leaf = line:match( + "([^|]+)|([^|]+)|([^|]+)|([^|]+)|([^|]+)") + -- Convert values to appropriate types + repetitions = tonumber(repetitions) + compute_leaf = Hash:from_digest_hex("0x" .. compute_leaf) + + builder:add(compute_leaf, repetitions) + end + + return builder:build(initial_state.root_hash) +end + +local INPUTS_QUERY = +[[sqlite3 /compute_data/%s/db 'select HEX(input) +from inputs ORDER BY input_index ASC']] +local function get_inputs_from_db(root_tournament) + local handle = io.popen(string.format(INPUTS_QUERY, root_tournament)) + assert(handle) + local rows = handle:read "*a" + handle:close() + + if rows:find "Error" then + error(string.format("Read inputs failed:\n%s", rows)) + end + + local inputs = {} + -- Iterate over each line in the input data + for line in rows:gmatch("[^\n]+") do + local input = line:match("([^|]+)") + table.insert(inputs, "0x" .. input) + end + + return inputs +end + +-- Function to setup players +local function setup_players(root_tournament, machine_path) + local player_coroutines = {} + local player_index = 1 + print("Calculating root commitment...") + local root_commitment = build_root_commitment_from_db(machine_path, root_tournament) + local inputs = get_inputs_from_db(root_tournament) + + if FAKE_COMMITMENT_COUNT > 0 then + print(string.format("Setting up dishonest player with %d fake commitments", FAKE_COMMITMENT_COUNT)) + local scoped_require = new_scoped_require(_ENV) + local start_sybil = scoped_require "runners.sybil_runner" + player_coroutines[player_index] = start_sybil(player_index + 1, machine_path, root_commitment, root_tournament, + FAKE_COMMITMENT_COUNT, inputs) + player_index = player_index + 1 + end + + if IDLE_PLAYER_COUNT > 0 then + print(string.format("Setting up %d idle players", IDLE_PLAYER_COUNT)) + local scoped_require = new_scoped_require(_ENV) + local start_idle = scoped_require "runners.idle_runner" + for _ = 1, IDLE_PLAYER_COUNT do + player_coroutines[player_index] = start_idle(player_index + 1, machine_path, root_tournament) + player_index = player_index + 1 + end + end + + return player_coroutines +end + +-- Function to run players +local function run_players(player_coroutines) + while true do + local idle = true + local has_live_coroutine = false + for i, c in ipairs(player_coroutines) do + if c then + local success, ret = coroutine.resume(c) + local status = coroutine.status(c) + + if status == "dead" then + player_coroutines[i] = false + end + if not success then + print(string.format("coroutine %d fail to resume with error: %s", i, ret)) + elseif ret then + has_live_coroutine = true + idle = idle and ret.idle + end + end + end + + if not has_live_coroutine then + print("No active players, ending attack...") + break + end + + if idle then + print(string.format("All players idle, fastforward blockchain for %d seconds...", FAST_FORWARD_TIME)) + blockchain_utils.advance_time(FAST_FORWARD_TIME, blockchain_constants.endpoint) + end + time.sleep(SLEEP_TIME) + end +end + +-- Main Execution +local rollups_machine_path = os.getenv("MACHINE_PATH") + +local blockchain_node = Blockchain:new() +time.sleep(NODE_DELAY) + +blockchain_utils.deploy_contracts("../../../cartesi-rollups/contracts") +time.sleep(NODE_DELAY) + +-- trace, debug, info, warn, error +local verbosity = os.getenv("VERBOSITY") or 'debug' +-- 0, 1, full +local trace_level = os.getenv("TRACE_LEVEL") or 'full' +local dave_node = Dave:new(rollups_machine_path, SLEEP_TIME, verbosity, trace_level) +time.sleep(NODE_DELAY) + +local reader = Reader:new(blockchain_constants.endpoint) +local sender = Sender:new(blockchain_constants.pks[1], blockchain_constants.endpoint) + +print("Hello from Dave rollups lua prototype!") + +local input_index = 1 + +while true do + local sealed_epochs = reader:read_epochs_sealed(CONSENSUS_ADDRESS) + + if #sealed_epochs > 0 then + local last_sealed_epoch = sealed_epochs[#sealed_epochs] + for _ = input_index, input_index + 2 do + sender:tx_add_input(INPUT_BOX_ADDRESS, APP_ADDRESS, ECHO_MSG) + end + + -- react to last sealed epoch + local root_tournament = sealed_epochs[#sealed_epochs].tournament + local work_path = string.format("/compute_data/%s", root_tournament) + if helper.exists(work_path) then + print(string.format("sybil player attacking epoch %d", + last_sealed_epoch.epoch_number)) + local epoch_machine_path = string.format("/rollups_data/%d/0", last_sealed_epoch.epoch_number) + local player_coroutines = setup_players(root_tournament, epoch_machine_path) + run_players(player_coroutines) + end + end + -- blockchain_utils.advance_time(EPOCH_TIME, blockchain_constants.endpoint) + time.sleep(SLEEP_TIME) +end + +print("Good-bye, world!") diff --git a/prt/tests/rollups/setup_path.lua b/prt/tests/rollups/setup_path.lua new file mode 100644 index 00000000..c736a82c --- /dev/null +++ b/prt/tests/rollups/setup_path.lua @@ -0,0 +1,7 @@ +-- setup client-lua path +package.path = package.path .. ";../compute/?.lua" +package.path = package.path .. ";../../client-lua/?.lua" + +-- setup cartesi machine path +package.path = package.path .. ";/opt/cartesi/lib/lua/5.4/?.lua" +package.cpath = package.cpath .. ";/opt/cartesi/lib/lua/5.4/?.so"