diff --git a/.gitignore b/.gitignore index 207f4551..ce66f6cb 100644 --- a/.gitignore +++ b/.gitignore @@ -7,8 +7,8 @@ target/ snapshots/ common-rs/Cargo.lock prt/client-rs/Cargo.lock -prt/lua_poc/outputs/ -prt/lua_poc/pixels/ +prt/tests/compute/outputs/ +prt/tests/compute/pixels/ node_modules **/contract-bindings/src/contract **/contract-bindings/Cargo.lock diff --git a/cartesi-rollups/contracts/deploy_anvil.sh b/cartesi-rollups/contracts/deploy_anvil.sh new file mode 100755 index 00000000..2fc2e0cb --- /dev/null +++ b/cartesi-rollups/contracts/deploy_anvil.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -euo pipefail + +INITIAL_HASH=`xxd -p -c32 "${MACHINE_PATH}/hash"` + +export PRIVATE_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 + +forge script \ + script/DaveConsensus.s.sol \ + --fork-url "http://127.0.0.1:8545" \ + --broadcast \ + --sig "run(bytes32)" \ + "${INITIAL_HASH}" \ + -vvvv diff --git a/cartesi-rollups/contracts/foundry.toml b/cartesi-rollups/contracts/foundry.toml index f7298151..6c7b826f 100644 --- a/cartesi-rollups/contracts/foundry.toml +++ b/cartesi-rollups/contracts/foundry.toml @@ -14,4 +14,6 @@ allow_paths = [ '../../machine/step/', ] +solc-version = "0.8.27" + # See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options diff --git a/cartesi-rollups/contracts/script/DaveConsensus.s.sol b/cartesi-rollups/contracts/script/DaveConsensus.s.sol new file mode 100644 index 00000000..7d662f60 --- /dev/null +++ b/cartesi-rollups/contracts/script/DaveConsensus.s.sol @@ -0,0 +1,26 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +pragma solidity ^0.8.17; + +import {Script} from "forge-std/Script.sol"; + +import {Machine} from "prt-contracts/Machine.sol"; + +import "prt-contracts/tournament/factories/MultiLevelTournamentFactory.sol"; +import "rollups-contracts/inputs/InputBox.sol"; +import "src/DaveConsensus.sol"; + +contract DaveConcensusScript is Script { + function run(Machine.Hash initialHash) external { + vm.startBroadcast(vm.envUint("PRIVATE_KEY")); + + InputBox inputBox = new InputBox(); + MultiLevelTournamentFactory factory = new MultiLevelTournamentFactory( + new TopTournamentFactory(), new MiddleTournamentFactory(), new BottomTournamentFactory() + ); + new DaveConsensus(inputBox, address(0x0), factory, initialHash); + + vm.stopBroadcast(); + } +} diff --git a/cartesi-rollups/node/Cargo.toml b/cartesi-rollups/node/Cargo.toml index c462257d..c133ccaf 100644 --- a/cartesi-rollups/node/Cargo.toml +++ b/cartesi-rollups/node/Cargo.toml @@ -44,5 +44,6 @@ clap = { version = "4.5.7", features = ["derive", "env"] } clap_derive = "=4.5.13" futures = "0.3" log = "0.4" +num-traits = "0.2.19" thiserror = "1.0" tokio = { version = "1", features = ["full"] } diff --git a/cartesi-rollups/node/Dockerfile.test b/cartesi-rollups/node/Dockerfile.test index 9cfee6f5..946e4cc2 100644 --- a/cartesi-rollups/node/Dockerfile.test +++ b/cartesi-rollups/node/Dockerfile.test @@ -15,7 +15,7 @@ WORKDIR /app RUN wget https://github.com/cartesi/image-kernel/releases/download/v0.20.0/linux-6.5.13-ctsi-1-v0.20.0.bin \ -O ./linux.bin -RUN wget https://github.com/cartesi/machine-emulator-tools/releases/download/v0.15.0/rootfs-tools-v0.15.0.ext2 \ +RUN wget https://github.com/cartesi/machine-emulator-tools/releases/download/v0.16.1/rootfs-tools-v0.16.1.ext2 \ -O ./rootfs.ext2 RUN cartesi-machine --ram-image=./linux.bin \ diff --git a/cartesi-rollups/node/blockchain-reader/Cargo.toml b/cartesi-rollups/node/blockchain-reader/Cargo.toml index 0d18f90a..6ed1f8a5 100644 --- a/cartesi-rollups/node/blockchain-reader/Cargo.toml +++ b/cartesi-rollups/node/blockchain-reader/Cargo.toml @@ -12,14 +12,15 @@ repository = { workspace = true } [dependencies] rollups-state-manager = { workspace = true } -alloy = { workspace = true } -async-recursion = { workspace = true } cartesi-dave-contracts = { workspace = true } cartesi-rollups-contracts = { workspace = true } + +alloy = { workspace = true } +alloy-rpc-types-eth = "0.3.1" +async-recursion = { workspace = true } clap = { workspace = true } clap_derive = { workspace = true } +log = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } - -alloy-rpc-types-eth = "0.3.1" -num-traits = "0.2.19" +num-traits = { workspace = true } diff --git a/cartesi-rollups/node/blockchain-reader/src/lib.rs b/cartesi-rollups/node/blockchain-reader/src/lib.rs index 12a07492..897272d7 100644 --- a/cartesi-rollups/node/blockchain-reader/src/lib.rs +++ b/cartesi-rollups/node/blockchain-reader/src/lib.rs @@ -7,6 +7,7 @@ use crate::error::{ProviderErrors, Result}; use alloy::{ contract::{Error, Event}, eips::BlockNumberOrTag::Finalized, + hex::ToHexExt, providers::{ network::primitives::BlockTransactionsKind, Provider, ProviderBuilder, RootProvider, }, @@ -17,8 +18,10 @@ use alloy_rpc_types_eth::Topic; use async_recursion::async_recursion; use clap::Parser; use error::BlockchainReaderError; +use log::debug; use num_traits::cast::ToPrimitive; use std::{ + iter::Peekable, marker::{Send, Sync}, str::FromStr, sync::Arc, @@ -29,18 +32,21 @@ use cartesi_dave_contracts::daveconsensus::DaveConsensus::EpochSealed; use cartesi_rollups_contracts::inputbox::InputBox::InputAdded; use rollups_state_manager::{Epoch, Input, InputId, StateManager}; +const DEVNET_CONCENSUS_ADDRESS: &str = "0x5FC8d32690cc91D4c39d9d3abcBD16989F875707"; +const DEVNET_INPUT_BOX_ADDRESS: &str = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; + #[derive(Debug, Clone, Parser)] #[command(name = "cartesi_rollups_config")] #[command(about = "Addresses of Cartesi Rollups")] pub struct AddressBook { /// address of app - #[arg(long, env)] + #[arg(long, env, default_value_t = Address::ZERO)] app: Address, /// address of Dave consensus - #[arg(long, env)] + #[arg(long, env, default_value = DEVNET_CONCENSUS_ADDRESS)] pub consensus: Address, /// address of input box - #[arg(long, env)] + #[arg(long, env, default_value = DEVNET_INPUT_BOX_ADDRESS)] input_box: Address, } @@ -85,9 +91,11 @@ where pub async fn start(&mut self) -> Result<(), SM> { loop { let current_block = self.provider.latest_finalized_block().await?; - self.advance(self.prev_block, current_block).await?; - self.prev_block = current_block; + if current_block > self.prev_block { + self.advance(self.prev_block, current_block).await?; + self.prev_block = current_block; + } tokio::time::sleep(self.sleep_duration).await; } } @@ -116,13 +124,23 @@ where .collect_sealed_epochs(prev_block, current_block) .await?; + let last_sealed_epoch_opt = self + .state_manager + .last_sealed_epoch() + .map_err(|e| BlockchainReaderError::StateManagerError(e))?; + let mut merged_sealed_epochs = Vec::new(); + if let Some(last_sealed_epoch) = last_sealed_epoch_opt { + merged_sealed_epochs.push(last_sealed_epoch); + } + merged_sealed_epochs.extend(sealed_epochs.clone()); + let merged_sealed_epochs_iter = merged_sealed_epochs + .iter() + .collect::>() + .into_iter(); + // read inputs from blockchain let inputs = self - .collect_inputs( - prev_block, - current_block, - sealed_epochs.iter().collect::>().into_iter(), - ) + .collect_inputs(prev_block, current_block, merged_sealed_epochs_iter) .await?; Ok((inputs, sealed_epochs)) @@ -144,18 +162,25 @@ where ) .await? .iter() - .map(|e| Epoch { - epoch_number: e - .0 - .epochNumber - .to_u64() - .expect("fail to convert epoch number"), - epoch_boundary: e - .0 - .blockNumberUpperBound - .to_u64() - .expect("fail to convert epoch boundary"), - root_tournament: e.0.tournament.to_string(), + .map(|e| { + let epoch = Epoch { + epoch_number: e + .0 + .epochNumber + .to_u64() + .expect("fail to convert epoch number"), + epoch_boundary: e + .0 + .blockNumberUpperBound + .to_u64() + .expect("fail to convert epoch boundary"), + root_tournament: e.0.tournament.to_string(), + }; + debug!( + "epoch received: epoch_number {}, epoch_boundary {}, root_tournament {}", + epoch.epoch_number, epoch.epoch_boundary, epoch.root_tournament + ); + epoch }) .collect()) } @@ -193,61 +218,64 @@ where }; let mut inputs = vec![]; - let mut input_events_iter = input_events.iter(); + let mut input_events_peekable = input_events.iter().peekable(); for epoch in sealed_epochs_iter { + if last_epoch_number > epoch.epoch_number { + continue; + } // iterate through newly sealed epochs, fill in the inputs accordingly - let inputs_of_epoch = self - .construct_input_ids( - epoch.epoch_number, - epoch.epoch_boundary, - &mut next_input_index_in_epoch, - &mut input_events_iter, - ) - .await; + let inputs_of_epoch = self.construct_input_ids( + epoch.epoch_number, + epoch.epoch_boundary, + &mut next_input_index_in_epoch, + &mut input_events_peekable, + ); inputs.extend(inputs_of_epoch); last_epoch_number = epoch.epoch_number + 1; } // all remaining inputs belong to an epoch that's not sealed yet - let inputs_of_epoch = self - .construct_input_ids( - last_epoch_number, - u64::MAX, - &mut next_input_index_in_epoch, - &mut input_events_iter, - ) - .await; + let inputs_of_epoch = self.construct_input_ids( + last_epoch_number, + u64::MAX, + &mut next_input_index_in_epoch, + &mut input_events_peekable, + ); inputs.extend(inputs_of_epoch); Ok(inputs) } - async fn construct_input_ids( + fn construct_input_ids<'a>( &self, epoch_number: u64, epoch_boundary: u64, next_input_index_in_epoch: &mut u64, - input_events_iter: &mut impl Iterator, + input_events_peekable: &mut Peekable>, ) -> Vec { let mut inputs = vec![]; - while input_events_iter - .peekable() - .peek() - .expect("fail to get peek next input") - .1 - < epoch_boundary - { + while let Some(input_added) = input_events_peekable.peek() { + if input_added.1 >= epoch_boundary { + break; + } let input = Input { id: InputId { epoch_number, input_index_in_epoch: *next_input_index_in_epoch, }, - data: input_events_iter.next().unwrap().0.input.to_vec(), + data: input_added.0.input.to_vec(), }; - + debug!( + "input received: epoch_number {}, input_index {}, data 0x{}", + input.id.epoch_number, + input.id.input_index_in_epoch, + input.data.encode_hex() + ); + + input_events_peekable.next(); *next_input_index_in_epoch += 1; inputs.push(input); } diff --git a/cartesi-rollups/node/compute-runner/Cargo.toml b/cartesi-rollups/node/compute-runner/Cargo.toml index 7b9c0122..5dacc99c 100644 --- a/cartesi-rollups/node/compute-runner/Cargo.toml +++ b/cartesi-rollups/node/compute-runner/Cargo.toml @@ -14,3 +14,4 @@ cartesi-prt-core = { workspace = true } rollups-state-manager = { workspace = true } alloy = { workspace = true } +log = { workspace = true } diff --git a/cartesi-rollups/node/compute-runner/src/lib.rs b/cartesi-rollups/node/compute-runner/src/lib.rs index b9a167ae..bb7060b9 100644 --- a/cartesi-rollups/node/compute-runner/src/lib.rs +++ b/cartesi-rollups/node/compute-runner/src/lib.rs @@ -1,4 +1,5 @@ use alloy::sol_types::private::Address; +use log::error; use std::result::Result; use std::{str::FromStr, sync::Arc, time::Duration}; @@ -10,8 +11,8 @@ use cartesi_prt_core::{ use rollups_state_manager::StateManager; pub struct ComputeRunner { + arena_sender: EthArenaSender, config: BlockchainConfig, - sender: EthArenaSender, sleep_duration: Duration, state_manager: Arc, } @@ -20,11 +21,15 @@ impl ComputeRunner where ::Error: Send + Sync + 'static, { - pub fn new(config: &BlockchainConfig, state_manager: Arc, sleep_duration: u64) -> Self { - let sender = EthArenaSender::new(&config).expect("fail to initialize sender"); + pub fn new( + arena_sender: EthArenaSender, + config: &BlockchainConfig, + state_manager: Arc, + sleep_duration: u64, + ) -> Self { Self { + arena_sender, config: config.clone(), - sender, sleep_duration: Duration::from_secs(sleep_duration), state_manager, } @@ -32,36 +37,50 @@ where pub async fn start(&mut self) -> Result<(), ::Error> { loop { - if let Some(last_sealed_epoch) = self.state_manager.last_epoch()? { - if let Some(snapshot) = self + // participate in last sealed epoch tournament + if let Some(last_sealed_epoch) = self.state_manager.last_sealed_epoch()? { + match self .state_manager - .snapshot(last_sealed_epoch.epoch_number, 0)? + .computation_hash(last_sealed_epoch.epoch_number)? { - // TODO: make sure all snapshots are available to compute - let inputs = self.state_manager.inputs(last_sealed_epoch.epoch_number)?; - let leafs = self - .state_manager - .machine_state_hashes(last_sealed_epoch.epoch_number)?; - let mut player = Player::new( - Some(inputs.into_iter().map(|i| Input(i)).collect()), - leafs - .into_iter() - .map(|l| { - Leaf( - l.0.as_slice() - .try_into() - .expect("fail to convert leaf from machine state hash"), - l.1, - ) - }) - .collect(), - &self.config, - snapshot, - Address::from_str(&last_sealed_epoch.root_tournament) - .expect("fail to convert tournament address"), - ) - .expect("fail to initialize compute player"); - let _ = player.react_once(&self.sender).await; + Some(_) => { + if let Some(snapshot) = self + .state_manager + .snapshot(last_sealed_epoch.epoch_number, 0)? + { + let inputs = + self.state_manager.inputs(last_sealed_epoch.epoch_number)?; + let leafs = self + .state_manager + .machine_state_hashes(last_sealed_epoch.epoch_number)?; + let mut player = Player::new( + Some(inputs.into_iter().map(|i| Input(i)).collect()), + leafs + .into_iter() + .map(|l| { + Leaf( + l.0.as_slice().try_into().expect( + "fail to convert leafs from machine state hash", + ), + l.1, + ) + }) + .collect(), + &self.config, + snapshot, + Address::from_str(&last_sealed_epoch.root_tournament) + .expect("fail to convert tournament address"), + ) + .expect("fail to initialize compute player"); + let _ = player + .react_once(&self.arena_sender) + .await + .inspect_err(|e| error!("{e}")); + } + } + None => { + // wait for the `machine-runner` to insert the value + } } } std::thread::sleep(self.sleep_duration); diff --git a/cartesi-rollups/node/dave-rollups/src/lib.rs b/cartesi-rollups/node/dave-rollups/src/lib.rs index 6991c6d9..40d7b153 100644 --- a/cartesi-rollups/node/dave-rollups/src/lib.rs +++ b/cartesi-rollups/node/dave-rollups/src/lib.rs @@ -1,4 +1,4 @@ -use cartesi_prt_core::arena::BlockchainConfig; +use cartesi_prt_core::arena::{BlockchainConfig, EthArenaSender, SenderFiller}; use clap::Parser; use log::error; @@ -57,6 +57,7 @@ pub fn create_blockchain_reader_task( } pub fn create_compute_runner_task( + arena_sender: EthArenaSender, state_manager: Arc, parameters: &DaveParameters, ) -> JoinHandle<()> { @@ -64,6 +65,7 @@ pub fn create_compute_runner_task( spawn(async move { let mut compute_runner = ComputeRunner::new( + arena_sender, ¶ms.blockchain_config, state_manager, params.sleep_duration, @@ -78,6 +80,7 @@ pub fn create_compute_runner_task( } pub fn create_epoch_manager_task( + client: Arc, state_manager: Arc, parameters: &DaveParameters, ) -> JoinHandle<()> { @@ -85,7 +88,7 @@ pub fn create_epoch_manager_task( spawn(async move { let epoch_manager = EpochManager::new( - ¶ms.blockchain_config, + client, params.address_book.consensus, state_manager, params.sleep_duration, diff --git a/cartesi-rollups/node/dave-rollups/src/main.rs b/cartesi-rollups/node/dave-rollups/src/main.rs index bc1a74b8..19b65d8a 100644 --- a/cartesi-rollups/node/dave-rollups/src/main.rs +++ b/cartesi-rollups/node/dave-rollups/src/main.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use cartesi_prt_core::arena::EthArenaSender; use clap::Parser; use dave_rollups::{ create_blockchain_reader_task, create_compute_runner_task, create_epoch_manager_task, @@ -20,10 +21,14 @@ async fn main() -> Result<()> { ¶meters.path_to_db, )?)?); + let arena_sender = EthArenaSender::new(¶meters.blockchain_config)?; + let client = arena_sender.client(); + let blockchain_reader_task = create_blockchain_reader_task(state_manager.clone(), ¶meters); - let epoch_manager_task = create_epoch_manager_task(state_manager.clone(), ¶meters); + let epoch_manager_task = create_epoch_manager_task(client, state_manager.clone(), ¶meters); let machine_runner_task = create_machine_runner_task(state_manager.clone(), ¶meters); - let compute_runner_task = create_compute_runner_task(state_manager.clone(), ¶meters); + let compute_runner_task = + create_compute_runner_task(arena_sender, state_manager.clone(), ¶meters); let (_blockchain_reader_res, _epoch_manager_res, _machine_runner_res, _compute_runner_res) = futures::join!( blockchain_reader_task, diff --git a/cartesi-rollups/node/epoch-manager/Cargo.toml b/cartesi-rollups/node/epoch-manager/Cargo.toml index c292bd0d..9ef015cc 100644 --- a/cartesi-rollups/node/epoch-manager/Cargo.toml +++ b/cartesi-rollups/node/epoch-manager/Cargo.toml @@ -13,6 +13,9 @@ repository.workspace = true cartesi-dave-contracts = { workspace = true } rollups-state-manager = { workspace = true } cartesi-prt-core = { workspace = true } + alloy = { workspace = true } anyhow = { workspace = true } +log = { workspace = true } +num-traits = { workspace = true } tokio = { workspace = true } diff --git a/cartesi-rollups/node/epoch-manager/src/lib.rs b/cartesi-rollups/node/epoch-manager/src/lib.rs index 2e810b6f..d2f39f75 100644 --- a/cartesi-rollups/node/epoch-manager/src/lib.rs +++ b/cartesi-rollups/node/epoch-manager/src/lib.rs @@ -1,19 +1,18 @@ -use alloy::{ - network::EthereumWallet, providers::ProviderBuilder, signers::local::PrivateKeySigner, - sol_types::private::Address, -}; +use alloy::{hex::ToHexExt, sol_types::private::Address}; use anyhow::Result; -use std::{str::FromStr, sync::Arc, time::Duration}; +use log::{error, info}; +use num_traits::cast::ToPrimitive; +use std::{sync::Arc, time::Duration}; use cartesi_dave_contracts::daveconsensus; -use cartesi_prt_core::arena::{BlockchainConfig, SenderFiller}; +use cartesi_prt_core::arena::SenderFiller; use rollups_state_manager::StateManager; pub struct EpochManager { + client: Arc, consensus: Address, sleep_duration: Duration, state_manager: Arc, - client: Arc, } impl EpochManager @@ -21,28 +20,11 @@ where ::Error: Send + Sync + 'static, { pub fn new( - config: &BlockchainConfig, + client: Arc, consensus_address: Address, state_manager: Arc, sleep_duration: u64, ) -> Self { - let signer = PrivateKeySigner::from_str(config.web3_private_key.as_str()) - .expect("fail to construct signer"); - let wallet = EthereumWallet::from(signer); - - let url = config.web3_rpc_url.parse().expect("fail to parse url"); - let provider = ProviderBuilder::new() - .with_recommended_fillers() - .wallet(wallet) - .with_chain( - config - .web3_chain_id - .try_into() - .expect("fail to convert chain id"), - ) - .on_http(url); - let client = Arc::new(provider); - Self { consensus: consensus_address, sleep_duration: Duration::from_secs(sleep_duration), @@ -57,14 +39,25 @@ where let can_settle = dave_consensus.canSettle().call().await?; if can_settle.isFinished { - match self.state_manager.computation_hash(0)? { + match self.state_manager.computation_hash( + can_settle + .epochNumber + .to_u64() + .expect("fail to convert epoch number to u64"), + )? { Some(computation_hash) => { - dave_consensus - .settle(can_settle.epochNumber) - .send() - .await? - .watch() - .await?; + info!( + "settle epoch {} with claim 0x{}", + can_settle.epochNumber, + computation_hash.encode_hex() + ); + match dave_consensus.settle(can_settle.epochNumber).send().await { + Ok(tx_builder) => { + let _ = tx_builder.watch().await.inspect_err(|e| error!("{}", e)); + } + // allow retry when errors happen + Err(e) => error!("{e}"), + } // TODO: if claim doesn't match, that can be a serious problem, send out alert } None => { @@ -72,7 +65,6 @@ where } } } - tokio::time::sleep(self.sleep_duration).await; } } diff --git a/cartesi-rollups/node/machine-runner/src/error.rs b/cartesi-rollups/node/machine-runner/src/error.rs index ea887fe2..722ab12d 100644 --- a/cartesi-rollups/node/machine-runner/src/error.rs +++ b/cartesi-rollups/node/machine-runner/src/error.rs @@ -14,6 +14,13 @@ pub enum MachineRunnerError { #[from] source: DigestError, }, + + #[error(transparent)] + IO { + #[from] + source: std::io::Error, + }, + #[error(transparent)] Machine { #[from] diff --git a/cartesi-rollups/node/machine-runner/src/lib.rs b/cartesi-rollups/node/machine-runner/src/lib.rs index 96024233..40f9a3a1 100644 --- a/cartesi-rollups/node/machine-runner/src/lib.rs +++ b/cartesi-rollups/node/machine-runner/src/lib.rs @@ -5,12 +5,12 @@ mod error; use alloy::sol_types::private::U256; use error::{MachineRunnerError, Result}; use std::{ + fs, path::{Path, PathBuf}, sync::Arc, time::Duration, }; -use cartesi_dave_arithmetic::max_uint; use cartesi_dave_merkle::{Digest, MerkleBuilder}; use cartesi_machine::{break_reason, configuration::RuntimeConfig, htif, machine::Machine}; use cartesi_prt_core::machine::constants::{LOG2_EMULATOR_SPAN, LOG2_INPUT_SPAN, LOG2_UARCH_SPAN}; @@ -82,6 +82,8 @@ where .map_err(|e| MachineRunnerError::StateManagerError(e))?; if self.epoch_number == latest_epoch { + // all inputs processed in current epoch + // epoch may still be open, come back later break Ok(()); } else { assert!(self.epoch_number < latest_epoch); @@ -98,6 +100,9 @@ where } fn advance_epoch(&mut self) -> Result<(), SM> { + if self.next_input_index_in_epoch == 0 { + self.take_snapshot()?; + } loop { let next = self .state_manager @@ -118,41 +123,43 @@ where } /// calculate computation hash for `self.epoch_number` - fn build_commitment(&self) -> Result, SM> { + fn build_commitment(&mut self) -> Result, SM> { // get all state hashes with repetitions for `self.epoch_number` - let state_hashes = self + let mut state_hashes = self .state_manager .machine_state_hashes(self.epoch_number) .map_err(|e| MachineRunnerError::StateManagerError(e))?; + let stride_count_in_epoch = + 1 << (LOG2_INPUT_SPAN + LOG2_EMULATOR_SPAN + LOG2_UARCH_SPAN - LOG2_STRIDE); + if state_hashes.len() == 0 { + // no inputs in current epoch, add machine state hash repeatedly + self.add_state_hash(stride_count_in_epoch)?; + state_hashes.push(( + self.machine.get_root_hash()?.as_bytes().to_vec(), + stride_count_in_epoch, + )); + } let computation_hash = { - if state_hashes.len() == 0 { - // no inputs in current epoch, reuse claim from previous epoch - self.state_manager - .computation_hash(self.epoch_number - 1) - .map_err(|e| MachineRunnerError::StateManagerError(e))? - .unwrap() - } else { - let mut builder = MerkleBuilder::default(); - let mut total_repetitions = 0; - for state_hash in &state_hashes { - total_repetitions += state_hash.1; - builder.append_repeated( - Digest::from_digest(&state_hash.0)?, - U256::from(state_hash.1), - ); - } - - let stride_count_in_epoch = - max_uint(LOG2_INPUT_SPAN + LOG2_EMULATOR_SPAN + LOG2_UARCH_SPAN - LOG2_STRIDE); + let mut builder = MerkleBuilder::default(); + let mut total_repetitions = 0; + for state_hash in &state_hashes { + total_repetitions += state_hash.1; + builder.append_repeated( + Digest::from_digest(&state_hash.0)?, + U256::from(state_hash.1), + ); + } + if stride_count_in_epoch > total_repetitions { + self.add_state_hash(stride_count_in_epoch - total_repetitions)?; builder.append_repeated( Digest::from_digest(&state_hashes.last().unwrap().0)?, - U256::from(stride_count_in_epoch - total_repetitions + 1), + U256::from(stride_count_in_epoch - total_repetitions), ); - - let tree = builder.build(); - tree.root_hash().slice().to_vec() } + + let tree = builder.build(); + tree.root_hash().slice().to_vec() }; Ok(computation_hash) @@ -222,15 +229,27 @@ where } fn take_snapshot(&self) -> Result<(), SM> { - // TODO: make sure "/rollups_data/{epoch_number}" exists - let snapshot_path = PathBuf::from(format!( - "/rollups_data/{}/{}", - self.epoch_number, + let epoch_path = PathBuf::from(format!("/rollups_data/{}", self.epoch_number)); + let snapshot_path = epoch_path.join(format!( + "{}", self.next_input_index_in_epoch << LOG2_EMULATOR_SPAN )); + if !epoch_path.exists() { + fs::create_dir_all(&epoch_path)?; + } if !snapshot_path.exists() { + self.state_manager + .add_snapshot( + snapshot_path + .to_str() + .expect("fail to convert snapshot path"), + self.epoch_number, + self.next_input_index_in_epoch, + ) + .map_err(|e| MachineRunnerError::StateManagerError(e))?; self.machine.store(&snapshot_path)?; } + Ok(()) } } @@ -284,8 +303,8 @@ mod tests { Ok(self.inputs.len() as u64) } - fn last_epoch(&self) -> Result> { - panic!("last_epoch not implemented in mock version"); + fn last_sealed_epoch(&self) -> Result> { + panic!("last_sealed_epoch not implemented in mock version"); } fn input(&self, id: &InputId) -> Result> { diff --git a/cartesi-rollups/node/state-manager/src/lib.rs b/cartesi-rollups/node/state-manager/src/lib.rs index d1b9d978..5ec7eee6 100644 --- a/cartesi-rollups/node/state-manager/src/lib.rs +++ b/cartesi-rollups/node/state-manager/src/lib.rs @@ -83,7 +83,7 @@ pub trait StateManager { fn epoch(&self, epoch_number: u64) -> Result, Self::Error>; fn epoch_count(&self) -> Result; - fn last_epoch(&self) -> Result, Self::Error>; + fn last_sealed_epoch(&self) -> Result, Self::Error>; fn input(&self, id: &InputId) -> Result, Self::Error>; fn inputs(&self, epoch_number: u64) -> Result>, Self::Error>; fn input_count(&self, epoch_number: u64) -> Result; diff --git a/cartesi-rollups/node/state-manager/src/persistent_state_access.rs b/cartesi-rollups/node/state-manager/src/persistent_state_access.rs index 429c78f0..2d97c763 100644 --- a/cartesi-rollups/node/state-manager/src/persistent_state_access.rs +++ b/cartesi-rollups/node/state-manager/src/persistent_state_access.rs @@ -37,9 +37,9 @@ impl StateManager for PersistentStateAccess { consensus_data::epoch_count(&conn) } - fn last_epoch(&self) -> Result> { + fn last_sealed_epoch(&self) -> Result> { let conn = self.connection.lock().unwrap(); - consensus_data::last_epoch(&conn) + consensus_data::last_sealed_epoch(&conn) } fn input(&self, id: &InputId) -> Result> { @@ -279,12 +279,6 @@ impl StateManager for PersistentStateAccess { res.push(row?); } - if res.len() == 0 { - return Err(PersistentStateAccessError::DataNotFound { - description: "machine state hash doesn't exist".to_owned(), - }); - } - Ok(res) } diff --git a/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs b/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs index aa7c04de..7cff76b7 100644 --- a/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs +++ b/cartesi-rollups/node/state-manager/src/sql/consensus_data.rs @@ -201,7 +201,7 @@ fn insert_epoch_statement<'a>(conn: &'a rusqlite::Connection) -> Result Result> { +pub fn last_sealed_epoch(conn: &rusqlite::Connection) -> Result> { let mut stmt = conn.prepare( "\ SELECT epoch_number, epoch_boundary, root_tournament FROM epochs diff --git a/cartesi-rollups/tests/rollups/Dockerfile b/cartesi-rollups/tests/rollups/Dockerfile new file mode 100644 index 00000000..6f33cffb --- /dev/null +++ b/cartesi-rollups/tests/rollups/Dockerfile @@ -0,0 +1,66 @@ +FROM rust:1.81.0-bookworm AS chef + +ENV CARGO_REGISTRIES_CARTESI_INDEX=https://github.com/cartesi/crates-index +RUN rustup component add rustfmt +RUN cargo install cargo-chef +RUN apt-get update && \ + apt-get install -y clang libslirp0 + +FROM chef AS planner +COPY ./machine/rust-bindings /app/machine/rust-bindings +COPY ./common-rs /app/common-rs +COPY ./prt/client-rs /app/prt/client-rs +COPY ./prt/contract-bindings /app/prt/contract-bindings +COPY ./cartesi-rollups/contract-bindings /app/cartesi-rollups/contract-bindings +COPY ./cartesi-rollups/node /app/cartesi-rollups/node + +WORKDIR /app/cartesi-rollups/node +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY ./machine /app/machine +COPY ./common-rs /app/common-rs +COPY ./prt/client-rs /app/prt/client-rs +COPY ./prt/contract-bindings /app/prt/contract-bindings +COPY ./cartesi-rollups/contract-bindings /app/cartesi-rollups/contract-bindings +COPY ./.git /app/.git +COPY --from=planner /app/cartesi-rollups/node/recipe.json /app/cartesi-rollups/node/recipe.json + +# Build dependencies - this is the caching Docker layer! +WORKDIR /app/cartesi-rollups/node +RUN cargo chef cook --release --recipe-path recipe.json + +# Build application +COPY --from=ethereum/solc:0.8.27 /usr/bin/solc /usr/bin/solc +RUN chmod u+x /usr/bin/solc + +COPY ./cartesi-rollups /app/cartesi-rollups + +WORKDIR /app/cartesi-rollups/node +RUN cargo build --release --bin dave-rollups + +FROM --platform=linux/amd64 cartesi/machine-emulator:0.18.1 + +USER root +RUN apt-get update && \ + apt-get install -y procps curl xxd clang sqlite3 +ENV FOUNDRY_NIGHTLY nightly-805d7cee81e78e9163b8ce3d86a0c3beb39772d4 +RUN curl -sSL https://github.com/foundry-rs/foundry/releases/download/${FOUNDRY_NIGHTLY}/foundry_nightly_linux_$(dpkg --print-architecture).tar.gz | \ + tar -zx -C /usr/local/bin + +# prepare echo machine +WORKDIR /root/program/ +COPY ./cartesi-rollups/tests/rollups/program/echo/echo-program.tar.gz /root/program/ +RUN tar -zx -f /root/program/echo-program.tar.gz + +COPY ./machine/step /root/machine/step +COPY ./prt /root/prt +COPY ./cartesi-rollups /root/cartesi-rollups +COPY --from=builder /app/cartesi-rollups/node/target/release/dave-rollups /root/cartesi-rollups/tests/rollups/dave-rollups + +WORKDIR /root/cartesi-rollups/contracts +RUN forge --version +RUN forge build + +WORKDIR /root/cartesi-rollups/tests/rollups +ENTRYPOINT ["./prt_rollups.lua"] diff --git a/cartesi-rollups/tests/rollups/Makefile b/cartesi-rollups/tests/rollups/Makefile new file mode 100644 index 00000000..bf1d6b9e --- /dev/null +++ b/cartesi-rollups/tests/rollups/Makefile @@ -0,0 +1,18 @@ +ECHO_MACHINE_PATH := "/root/program/echo-program" + +help: + @echo ' create-image - create `prt-rollups:test` docker image' + @echo ' test-echo - run PRT echo test' + +create-image: + @docker build -t cartesi/prt-rollups:test -f Dockerfile ../../../ + +test-echo: create-image + @docker run --rm --name rollups-echo-test \ + --env MACHINE_PATH=$(ECHO_MACHINE_PATH) \ + cartesi/prt-rollups:test + + + + +.PHONY: help create-image test-echo diff --git a/cartesi-rollups/tests/rollups/README.md b/cartesi-rollups/tests/rollups/README.md new file mode 100644 index 00000000..296a1370 --- /dev/null +++ b/cartesi-rollups/tests/rollups/README.md @@ -0,0 +1,24 @@ +# PRT Rollups test + +This directory contains a rollups node written in Rust. +The node test will be conducted with a Lua orchestrator script spawning an honest rollups node in the background to advance the rollups states and to defend the application. The Lua orchestrator script also spawns multiple [dishonest nodes](../../../prt/tests/compute/README.md) trying to tamper with the rollups states. + +Remember to either clone the repository with the flag `--recurse-submodules`, or run `git submodule update --recursive --init` after cloning. +You need a docker installation to run the Dave Lua node. + +## Build test image + +In order to run tests in this directory, a docker image must be built to prepare the test environment. +Once the test image is built, the user can run all the tests supported by swapping the `MACHINE_PATH` env variable. + +``` +make create-image +``` + +## Run echo test + +A simple [echo program](./program/echo/) is provided to test the rollups. + +``` +make test-echo +``` diff --git a/cartesi-rollups/tests/rollups/dave/node.lua b/cartesi-rollups/tests/rollups/dave/node.lua new file mode 100644 index 00000000..2ae74d2b --- /dev/null +++ b/cartesi-rollups/tests/rollups/dave/node.lua @@ -0,0 +1,40 @@ +local helper = require "utils.helper" + +local function start_dave_node(machine_path, db_path) + local cmd = string.format( + [[sh -c "echo $$ ; exec env MACHINE_PATH='%s' PATH_TO_DB='%s' \ + SLEEP_DURATION=4 RUST_BACKTRACE=1 RUST_LOG='info' RUST_BACKTRACE=full \ + ./dave-rollups > dave.log 2>&1"]], + machine_path, db_path) + + local reader = io.popen(cmd) + assert(reader, "`popen` returned nil reader") + + local pid = tonumber(reader:read()) + + local handle = { reader = reader, pid = pid } + setmetatable(handle, { + __gc = function(t) + helper.stop_pid(t.reader, t.pid) + end + }) + + print(string.format("Dave node running with pid %d", pid)) + return handle +end + +local Dave = {} +Dave.__index = Dave + +function Dave:new(machine_path) + local n = {} + + local handle = start_dave_node(machine_path, "./dave.db") + + n._handle = handle + + setmetatable(n, self) + return n +end + +return Dave diff --git a/cartesi-rollups/tests/rollups/dave/reader.lua b/cartesi-rollups/tests/rollups/dave/reader.lua new file mode 100644 index 00000000..c277b722 --- /dev/null +++ b/cartesi-rollups/tests/rollups/dave/reader.lua @@ -0,0 +1,205 @@ +local Hash = require "cryptography.hash" +local eth_abi = require "utils.eth_abi" +local helper = require "utils.helper" + +local function parse_topics(json) + local _, _, topics = json:find( + [==["topics":%[([^%]]*)%]]==] + ) + + local t = {} + for k, _ in string.gmatch(topics, [["(0x%x+)"]]) do + table.insert(t, k) + end + + return t +end + +local function parse_data(json, sig) + local _, _, data = json:find( + [==["data":"(0x%x+)"]==] + ) + + local decoded_data = eth_abi.decode_event_data(sig, data) + return decoded_data +end + +local function parse_meta(json) + local _, _, block_hash = json:find( + [==["blockHash":"(0x%x+)"]==] + ) + + local _, _, block_number = json:find( + [==["blockNumber":"(0x%x+)"]==] + ) + + local _, _, log_index = json:find( + [==["logIndex":"(0x%x+)"]==] + ) + + local t = { + block_hash = block_hash, + block_number = tonumber(block_number), + log_index = tonumber(log_index), + } + + return t +end + + +local function parse_logs(logs, data_sig) + local ret = {} + for k, _ in string.gmatch(logs, [[{[^}]*}]]) do + local emited_topics = parse_topics(k) + local decoded_data = parse_data(k, data_sig) + local meta = parse_meta(k) + table.insert(ret, { emited_topics = emited_topics, decoded_data = decoded_data, meta = meta }) + end + + return ret +end + +local function sanitize_string(s) + -- remove spaces, scientific notations and color code + return s:gsub("%s+", ""):gsub("%b[]", ""):gsub("\27%[[%d;]*m", "") +end + +local Reader = {} +Reader.__index = Reader + +function Reader:new(endpoint) + local reader = { + endpoint = assert(endpoint) + } + + setmetatable(reader, self) + return reader +end + +local cast_logs_template = [==[ +cast rpc -r "%s" eth_getLogs \ + '[{"fromBlock": "earliest", "toBlock": "latest", "address": "%s", "topics": [%s]}]' -w 2>&1 +]==] + +function Reader:_read_logs(contract_address, sig, topics, data_sig) + topics = topics or { false, false, false } + local encoded_sig = eth_abi.encode_sig(sig) + table.insert(topics, 1, encoded_sig) + assert(#topics == 4, "topics doesn't have four elements") + + local topics_strs = {} + for _, v in ipairs(topics) do + local s + if v then + s = '"' .. v .. '"' + else + s = "null" + end + table.insert(topics_strs, s) + end + local topic_str = table.concat(topics_strs, ", ") + + local cmd = string.format( + cast_logs_template, + self.endpoint, + contract_address, + topic_str + ) + + local handle = io.popen(cmd) + assert(handle) + local logs = handle:read "*a" + handle:close() + + if logs:find "Error" then + error(string.format("Read logs `%s` failed:\n%s", sig, logs)) + end + + local ret = parse_logs(logs, data_sig) + return ret +end + +local cast_call_template = [==[ +cast call --rpc-url "%s" "%s" "%s" %s 2>&1 +]==] + +function Reader:_call(address, sig, args) + local quoted_args = {} + for _, v in ipairs(args) do + table.insert(quoted_args, '"' .. v .. '"') + end + local args_str = table.concat(quoted_args, " ") + + local cmd = string.format( + cast_call_template, + self.endpoint, + address, + sig, + args_str + ) + + local handle = io.popen(cmd) + assert(handle) + + local ret = {} + local str = handle:read() + while str do + if str:find "Error" or str:find "error" then + local err_str = handle:read "*a" + handle:close() + error(string.format("Call `%s` failed:\n%s%s", sig, str, err_str)) + end + + table.insert(ret, str) + str = handle:read() + end + handle:close() + + return ret +end + +function Reader:read_epochs_sealed(concensus_address) + local sig = "EpochSealed(uint256,uint256,uint256,bytes32,address)" + local data_sig = "(uint256,uint256,uint256,bytes32,address)" + + local logs = self:_read_logs(concensus_address, sig, { false, false, false }, data_sig) + + local ret = {} + for k, v in ipairs(logs) do + local log = {} + log.meta = v.meta + + log.epoch_number = tonumber(v.decoded_data[1]) + log.block_lower_bound = tonumber(v.decoded_data[2]) + log.block_upper_bound = tonumber(v.decoded_data[3]) + log.initial_machine_state_hash = v.decoded_data[4] + log.tournament = v.decoded_data[5] + + ret[k] = log + end + + return ret +end + +function Reader:read_inputs_added(input_box_address) + local sig = "InputAdded(address,uint256,bytes)" + local data_sig = "(bytes)" + + local logs = self:_read_logs(input_box_address, sig, { false, false, false }, data_sig) + + local ret = {} + for k, v in ipairs(logs) do + local log = {} + log.meta = v.meta + + log.app_contract = v.emited_topics[2] + log.index = tonumber(v.emited_topics[3]) + log.data = v.decoded_data[1] + + ret[k] = log + end + + return ret +end + +return Reader diff --git a/cartesi-rollups/tests/rollups/dave/sender.lua b/cartesi-rollups/tests/rollups/dave/sender.lua new file mode 100644 index 00000000..106be9ba --- /dev/null +++ b/cartesi-rollups/tests/rollups/dave/sender.lua @@ -0,0 +1,90 @@ +local Hash = require "cryptography.hash" +local MerkleTree = require "cryptography.merkle_tree" + +local function quote_args(args, not_quote) + local quoted_args = {} + for _, v in ipairs(args) do + if type(v) == "table" and (getmetatable(v) == Hash or getmetatable(v) == MerkleTree) then + if not_quote then + table.insert(quoted_args, v:hex_string()) + else + table.insert(quoted_args, '"' .. v:hex_string() .. '"') + end + elseif type(v) == "table" then + if v._tag == "tuple" then + local qa = quote_args(v, true) + local ca = table.concat(qa, ",") + local sb = "'(" .. ca .. ")'" + table.insert(quoted_args, sb) + else + local qa = quote_args(v, true) + local ca = table.concat(qa, ",") + local sb = "'[" .. ca .. "]'" + table.insert(quoted_args, sb) + end + elseif not_quote then + table.insert(quoted_args, tostring(v)) + else + table.insert(quoted_args, '"' .. v .. '"') + end + end + + return quoted_args +end + + +local Sender = {} +Sender.__index = Sender + +function Sender:new(pk, endpoint) + local sender = { + pk = pk, + endpoint = endpoint + } + + setmetatable(sender, self) + return sender +end + +local cast_send_template = [[ +cast send --private-key "%s" --rpc-url "%s" "%s" "%s" %s 2>&1 +]] + +function Sender:_send_tx(contract_address, sig, args) + local quoted_args = quote_args(args) + local args_str = table.concat(quoted_args, " ") + + local cmd = string.format( + cast_send_template, + self.pk, + self.endpoint, + contract_address, + sig, + args_str + ) + + local handle = io.popen(cmd) + assert(handle) + + local ret = handle:read "*a" + if ret:find "Error" then + handle:close() + error(string.format("Send transaction `%s` reverted:\n%s", cmd, ret)) + end + + self.tx_count = self.tx_count + 1 + handle:close() +end + +function Sender:tx_add_input(input_box_address, app_contract_address, payload) + local sig = [[addInput(address,bytes)(bytes32)]] + return pcall( + self._send_tx, + self, + input_box_address, + sig, + { app_contract_address, payload } + ) +end + +return Sender diff --git a/cartesi-rollups/tests/rollups/program/.dockerignore b/cartesi-rollups/tests/rollups/program/.dockerignore new file mode 100644 index 00000000..643b3a57 --- /dev/null +++ b/cartesi-rollups/tests/rollups/program/.dockerignore @@ -0,0 +1 @@ +**/*tar.gz diff --git a/cartesi-rollups/tests/rollups/program/.gitignore b/cartesi-rollups/tests/rollups/program/.gitignore new file mode 100644 index 00000000..643b3a57 --- /dev/null +++ b/cartesi-rollups/tests/rollups/program/.gitignore @@ -0,0 +1 @@ +**/*tar.gz diff --git a/cartesi-rollups/tests/rollups/program/echo/.gitignore b/cartesi-rollups/tests/rollups/program/echo/.gitignore new file mode 100644 index 00000000..6e9e4252 --- /dev/null +++ b/cartesi-rollups/tests/rollups/program/echo/.gitignore @@ -0,0 +1,2 @@ +simple-linux-program/ +simple-program/ diff --git a/cartesi-rollups/tests/rollups/program/echo/Dockerfile b/cartesi-rollups/tests/rollups/program/echo/Dockerfile new file mode 100644 index 00000000..9361a52b --- /dev/null +++ b/cartesi-rollups/tests/rollups/program/echo/Dockerfile @@ -0,0 +1,13 @@ +FROM cartesi/machine-emulator:0.18.1 +USER root +RUN apt-get update && \ + apt-get install -y wget + +RUN wget https://github.com/cartesi/image-kernel/releases/download/v0.20.0/linux-6.5.13-ctsi-1-v0.20.0.bin \ + -O ./linux.bin +RUN wget https://github.com/cartesi/machine-emulator-tools/releases/download/v0.16.1/rootfs-tools-v0.16.1.ext2 \ + -O ./rootfs.ext2 + +COPY ./gen_machine_echo.sh . +RUN chmod +x gen_machine_echo.sh +RUN ./gen_machine_echo.sh && tar -zvcf /echo-program.tar.gz echo-program diff --git a/cartesi-rollups/tests/rollups/program/echo/README.md b/cartesi-rollups/tests/rollups/program/echo/README.md new file mode 100644 index 00000000..f726fb17 --- /dev/null +++ b/cartesi-rollups/tests/rollups/program/echo/README.md @@ -0,0 +1,10 @@ +# Simple echo program + +## Generate program + +From this directory, run the following: + +``` +docker build -t echo:test . +docker cp $(docker create echo:test):/echo-program.tar.gz . +``` diff --git a/cartesi-rollups/tests/rollups/program/echo/gen_machine_echo.sh b/cartesi-rollups/tests/rollups/program/echo/gen_machine_echo.sh new file mode 100755 index 00000000..99252fff --- /dev/null +++ b/cartesi-rollups/tests/rollups/program/echo/gen_machine_echo.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cartesi-machine --ram-image=./linux.bin \ + --flash-drive=label:root,filename:./rootfs.ext2 \ + --no-rollback --store=./echo-program \ + -- "ioctl-echo-loop --vouchers=1 --notices=1 --reports=1 --verbose=1" diff --git a/cartesi-rollups/tests/rollups/program/echo/hash b/cartesi-rollups/tests/rollups/program/echo/hash new file mode 100644 index 00000000..585dce09 --- /dev/null +++ b/cartesi-rollups/tests/rollups/program/echo/hash @@ -0,0 +1,2 @@ +-"B+y + ˁ \ No newline at end of file diff --git a/cartesi-rollups/tests/rollups/prt_rollups.lua b/cartesi-rollups/tests/rollups/prt_rollups.lua new file mode 100755 index 00000000..71de905d --- /dev/null +++ b/cartesi-rollups/tests/rollups/prt_rollups.lua @@ -0,0 +1,230 @@ +#!/usr/bin/lua +require "setup_path" + +-- amount of time sleep between each react +local SLEEP_TIME = 4 +-- amount of time to fastforward if `IDLE_LIMIT` is reached +local FAST_FORWARD_TIME = 90 +-- amount of time to fastforward to advance an epoch +local EPOCH_TIME = 60 * 60 * 24 * 7 +-- delay time for blockchain node to be ready +local NODE_DELAY = 3 +-- number of fake commitment to make +local FAKE_COMMITMENT_COUNT = 1 +-- number of idle players +local IDLE_PLAYER_COUNT = 0 +-- concensus contract address in anvil deployment +local CONCENSUS_ADDRESS = "0x5FC8d32690cc91D4c39d9d3abcBD16989F875707" +-- input contract address in anvil deployment +local INPUT_BOX_ADDRESS = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; +-- app contract address in anvil deployment +local APP_ADDRESS = "0x0000000000000000000000000000000000000000"; +-- Hello from Dave! +local ECHO_MSG = "0x48656c6c6f2076726f6d204461766521" +-- Encoded Input blob +-- 31337 +-- 0x0000000000000000000000000000000000000000 +-- 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 +-- 1 +-- 0 +-- 1 +-- 0 +-- "0x48656c6c6f2076726f6d204461766521" +-- cast abi-encode "EvmAdvance(uint256,address,address,uint256,uint256,uint256,uint256,bytes)" 31337 "0x0000000000000000000000000000000000000000" "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" 1 1622547800 1 0 "0x48656c6c6f2076726f6d204461766521" +local ENCODED_INPUT = +"0x0000000000000000000000000000000000000000000000000000000000007a690000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000060b61d58000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000001048656c6c6f2076726f6d20446176652100000000000000000000000000000000" + +-- Required Modules +local new_scoped_require = require "utils.scoped_require" + +local helper = require "utils.helper" +local blockchain_utils = require "blockchain.utils" +local time = require "utils.time" +local blockchain_constants = require "blockchain.constants" +local Blockchain = require "blockchain.node" +local CommitmentBuilder = require "computation.commitment" +local Dave = require "dave.node" +local Hash = require "cryptography.hash" +local Machine = require "computation.machine" +local MerkleBuilder = require "cryptography.merkle_builder" +local Reader = require "dave.reader" +local Sender = require "dave.sender" + +local function get_root_constants(root_tournament) + local TournamentReader = require "player.reader" + local reader = TournamentReader:new(blockchain_constants.endpoint) + local root_constants = reader:read_constants(root_tournament) + + return root_constants +end + +local ROOT_LEAFS_QUERY = +[[sqlite3 /compute_data/%s/db 'select level,base_cycle,compute_leaf_index,repetitions,HEX(compute_leaf) +from compute_leafs where level=0 ORDER BY compute_leaf_index ASC']] +local function build_root_commitment_from_db(machine_path, root_tournament) + local builder = MerkleBuilder:new() + local machine = Machine:new_from_path(machine_path) + local initial_state = machine:state() + + local handle = io.popen(string.format(ROOT_LEAFS_QUERY, root_tournament)) + assert(handle) + local rows = handle:read "*a" + handle:close() + + if rows:find "Error" then + error(string.format("Read leafs failed:\n%s", rows)) + end + + -- Iterate over each line in the input data + for line in rows:gmatch("[^\n]+") do + local level, base_cycle, compute_leafs_index, repetitions, compute_leaf = line:match( + "([^|]+)|([^|]+)|([^|]+)|([^|]+)|([^|]+)") + -- Convert values to appropriate types + repetitions = tonumber(repetitions) + compute_leaf = Hash:from_digest_hex("0x" .. compute_leaf) + + builder:add(compute_leaf, repetitions) + end + + return builder:build(initial_state.root_hash) +end + +local INPUTS_QUERY = +[[sqlite3 /compute_data/%s/db 'select HEX(input) +from inputs ORDER BY input_index ASC']] +local function get_inputs_from_db(root_tournament) + local handle = io.popen(string.format(INPUTS_QUERY, root_tournament)) + assert(handle) + local rows = handle:read "*a" + handle:close() + + if rows:find "Error" then + error(string.format("Read inputs failed:\n%s", rows)) + end + + local inputs = {} + -- Iterate over each line in the input data + for line in rows:gmatch("[^\n]+") do + local input = line:match("([^|]+)") + table.insert(inputs, "0x" .. input) + end + + return inputs +end + +-- Function to setup players +local function setup_players(root_tournament, machine_path) + local root_constants = get_root_constants(root_tournament) + + local player_coroutines = {} + local player_index = 1 + print("Calculating root commitment...") + -- local snapshot_dir = string.format("/compute_data/%s", root_tournament) + -- local builder = CommitmentBuilder:new(machine_path, snapshot_dir) + -- local root_commitment = builder:build(0, 0, root_constants.log2_step, root_constants.height, inputs) + local root_commitment = build_root_commitment_from_db(machine_path, root_tournament) + local inputs = get_inputs_from_db(root_tournament) + + if FAKE_COMMITMENT_COUNT > 0 then + print(string.format("Setting up dishonest player with %d fake commitments", FAKE_COMMITMENT_COUNT)) + local scoped_require = new_scoped_require(_ENV) + local start_sybil = scoped_require "runners.sybil_runner" + player_coroutines[player_index] = start_sybil(player_index + 1, machine_path, root_commitment, root_tournament, + FAKE_COMMITMENT_COUNT, inputs) + player_index = player_index + 1 + end + + if IDLE_PLAYER_COUNT > 0 then + print(string.format("Setting up %d idle players", IDLE_PLAYER_COUNT)) + local scoped_require = new_scoped_require(_ENV) + local start_idle = scoped_require "runners.idle_runner" + for _ = 1, IDLE_PLAYER_COUNT do + player_coroutines[player_index] = start_idle(player_index + 1, machine_path, root_tournament) + player_index = player_index + 1 + end + end + + return player_coroutines +end + +-- Function to run players +local function run_players(player_coroutines) + while true do + local idle = true + local has_live_coroutine = false + for i, c in ipairs(player_coroutines) do + if c then + local success, ret = coroutine.resume(c) + local status = coroutine.status(c) + + if status == "dead" then + player_coroutines[i] = false + end + if not success then + print(string.format("coroutine %d fail to resume with error: %s", i, ret)) + elseif ret then + has_live_coroutine = true + idle = idle and ret.idle + end + end + end + + if not has_live_coroutine then + print("No active players, ending program...") + break + end + + if idle then + print(string.format("All players idle, fastforward blockchain for %d seconds...", FAST_FORWARD_TIME)) + blockchain_utils.advance_time(FAST_FORWARD_TIME, blockchain_constants.endpoint) + end + time.sleep(SLEEP_TIME) + end +end + +-- Main Execution +local rollups_machine_path = os.getenv("MACHINE_PATH") + +local blockchain_node = Blockchain:new() +time.sleep(NODE_DELAY) + +blockchain_utils.deploy_contracts() +time.sleep(NODE_DELAY) + +local dave_node = Dave:new(rollups_machine_path) +time.sleep(NODE_DELAY) + +local reader = Reader:new(blockchain_constants.endpoint) +local sender = Sender:new(blockchain_constants.pks[1], blockchain_constants.endpoint) + +print("Hello from Dave rollups lua prototype!") + +local input_index = 1 + +while true do + local sealed_epochs = reader:read_epochs_sealed(CONCENSUS_ADDRESS) + + if #sealed_epochs > 0 then + local last_sealed_epoch = sealed_epochs[#sealed_epochs] + local inputs = {} + for _ = input_index, input_index + 2 do + sender:tx_add_input(INPUT_BOX_ADDRESS, APP_ADDRESS, ECHO_MSG) + end + + -- react to last sealed epoch + local root_tournament = sealed_epochs[#sealed_epochs].tournament + local work_path = string.format("/compute_data/%s", root_tournament) + if helper.exists(work_path) then + print(string.format("sybil player attacking epoch %d", + last_sealed_epoch.epoch_number)) + local epoch_machine_path = string.format("/rollups_data/%d/0", last_sealed_epoch.epoch_number) + local player_coroutines = setup_players(root_tournament, epoch_machine_path) + run_players(player_coroutines) + end + end + -- TODO: send input + -- blockchain_utils.advance_time(EPOCH_TIME, blockchain_constants.endpoint) + time.sleep(SLEEP_TIME) +end + +print("Good-bye, world!") diff --git a/cartesi-rollups/tests/rollups/setup_path.lua b/cartesi-rollups/tests/rollups/setup_path.lua new file mode 100644 index 00000000..7286bfaf --- /dev/null +++ b/cartesi-rollups/tests/rollups/setup_path.lua @@ -0,0 +1,7 @@ +-- setup client-lua path +package.path = package.path .. ";../../../prt/client-lua/?.lua" +package.path = package.path .. ";../../../prt/tests/compute/?.lua" + +-- setup cartesi machine path +package.path = package.path .. ";/opt/cartesi/lib/lua/5.4/?.lua" +package.cpath = package.cpath .. ";/opt/cartesi/lib/lua/5.4/?.so" diff --git a/prt/client-lua/computation/commitment.lua b/prt/client-lua/computation/commitment.lua index 8bd1574f..257d48f8 100644 --- a/prt/client-lua/computation/commitment.lua +++ b/prt/client-lua/computation/commitment.lua @@ -7,6 +7,7 @@ local consts = require "computation.constants" local ulte = arithmetic.ulte local save_snapshot = true +local handle_rollups = false local function run_uarch_span(machine) assert(machine.ucycle == 0) @@ -38,7 +39,7 @@ local function build_small_machine_commitment(base_cycle, log2_stride_count, mac local machine_state = machine:state() if save_snapshot then -- taking snapshot for leafs to save time in next level - machine:take_snapshot(snapshot_dir, base_cycle) + machine:take_snapshot(snapshot_dir, base_cycle, handle_rollups) end local initial_state = machine_state.root_hash @@ -63,7 +64,7 @@ local function build_big_machine_commitment(base_cycle, log2_stride, log2_stride local machine_state = machine:state() if save_snapshot then -- taking snapshot for leafs to save time in next level - machine:take_snapshot(snapshot_dir, base_cycle) + machine:take_snapshot(snapshot_dir, base_cycle, handle_rollups) end local initial_state = machine_state.root_hash @@ -75,13 +76,13 @@ local function build_big_machine_commitment(base_cycle, log2_stride, log2_stride local cycle = ((instruction + 1) << (log2_stride - consts.log2_uarch_span)) machine_state = machine:run(base_cycle + cycle) - if not machine_state.halted then - builder:add(machine_state.root_hash) - instruction = instruction + 1 - else + if machine_state.halted or machine_state.yielded then -- add this loop plus all remainings builder:add(machine_state.root_hash, instruction_count - instruction + 1) break + else + builder:add(machine_state.root_hash) + instruction = instruction + 1 end end @@ -93,9 +94,11 @@ local function build_commitment(base_cycle, log2_stride, log2_stride_count, mach machine:load_snapshot(snapshot_dir, base_cycle) if inputs then -- treat it as rollups + handle_rollups = true machine:run_with_inputs(base_cycle, inputs) else -- treat it as compute + handle_rollups = false machine:run(base_cycle) end diff --git a/prt/client-lua/computation/machine.lua b/prt/client-lua/computation/machine.lua index efc37a28..d31d3c11 100644 --- a/prt/client-lua/computation/machine.lua +++ b/prt/client-lua/computation/machine.lua @@ -7,10 +7,11 @@ local helper = require "utils.helper" local ComputationState = {} ComputationState.__index = ComputationState -function ComputationState:new(root_hash, halted, uhalted) +function ComputationState:new(root_hash, halted, yielded, uhalted) local r = { root_hash = root_hash, halted = halted, + yielded = yielded, uhalted = uhalted } setmetatable(r, self) @@ -22,15 +23,17 @@ function ComputationState.from_current_machine_state(machine) return ComputationState:new( hash, machine:read_iflags_H(), + machine:read_iflags_Y(), machine:read_uarch_halt_flag() ) end ComputationState.__tostring = function(x) return string.format( - "{root_hash = %s, halted = %s, uhalted = %s}", + "{root_hash = %s, halted = %s, yielded = %s, uhalted = %s}", x.root_hash, x.halted, + x.yielded, x.uhalted ) end @@ -93,6 +96,7 @@ local function find_closest_snapshot(path, current_cycle, cycle) -- Binary search for the closest number smaller than target cycle local closest_dir = nil + local closest_cycle = nil local low, high = 1, #directories while low <= high do @@ -101,16 +105,23 @@ local function find_closest_snapshot(path, current_cycle, cycle) if mid_number < cycle and mid_number > current_cycle then closest_dir = directories[mid].path + closest_cycle = directories[mid].number low = mid + 1 -- Search in the larger half else high = mid - 1 -- Search in the smaller half end end - return closest_dir + return closest_cycle, closest_dir end -function Machine:take_snapshot(snapshot_dir, cycle) +function Machine:take_snapshot(snapshot_dir, cycle, handle_rollups) + local input_mask = arithmetic.max_uint(consts.log2_emulator_span) + if handle_rollups and cycle & input_mask == 0 and not self.yielded then + -- dont snapshot a machine state that's freshly fed with input without advance + return + end + if helper.exists(snapshot_dir) then local snapshot_path = snapshot_dir .. "/" .. tostring(cycle) @@ -122,15 +133,16 @@ function Machine:take_snapshot(snapshot_dir, cycle) end function Machine:load_snapshot(snapshot_dir, cycle) + local snapshot_cycle = cycle local snapshot_path = snapshot_dir .. "/" .. tostring(cycle) if not helper.exists(snapshot_path) then -- find closest snapshot if direct snapshot doesn't exists - snapshot_path = find_closest_snapshot(snapshot_dir, self.cycle, cycle) + snapshot_cycle, snapshot_path = find_closest_snapshot(snapshot_dir, self.cycle, cycle) end if snapshot_path then local machine = cartesi.machine(snapshot_path, machine_settings) - self.cycle = machine:read_mcycle() - self.start_cycle + self.cycle = snapshot_cycle self.machine = machine end end @@ -148,7 +160,7 @@ function Machine:run(cycle) local physical_cycle = add_and_clamp(self.start_cycle, cycle) -- TODO reconsider for lambda local machine = self.machine - while not (machine:read_iflags_H() or machine:read_mcycle() == physical_cycle) do + while not (machine:read_iflags_H() or machine:read_iflags_Y() or machine:read_mcycle() == physical_cycle) do machine:run(physical_cycle) end @@ -176,11 +188,15 @@ function Machine:run_with_inputs(cycle, inputs) end local next_input_cycle = next_input_index << consts.log2_emulator_span - while next_input_cycle < cycle do + while next_input_cycle <= cycle do self:run(next_input_cycle) - local input = inputs[next_input_index] + local input = inputs[next_input_index + 1] if input then - self.machine:send_cmio_response(cartesi.machine.HTIF_YIELD_REASON_ADVANCE_STATE, input); + local h = assert(input:match("0x(%x+)"), input) + local data_hex = (h:gsub('..', function(cc) + return string.char(tonumber(cc, 16)) + end)) + self.machine:send_cmio_response(cartesi.machine.HTIF_YIELD_REASON_ADVANCE_STATE, data_hex); end next_input_index = next_input_index + 1 @@ -266,20 +282,23 @@ function Machine.get_logs(path, snapshot_dir, cycle, ucycle, inputs) machine:load_snapshot(snapshot_dir, cycle) local logs = {} local log_type = { annotations = true, proofs = true } - local input = Hash.zero if inputs then -- treat it as rollups - machine:run_with_inputs(cycle, inputs) + machine:run_with_inputs(cycle - 1, inputs) + machine:run(cycle) local mask = arithmetic.max_uint(consts.log2_emulator_span); - local try_input = inputs[cycle >> consts.log2_emulator_span] - if cycle & mask == 0 and try_input then - input = try_input + local input = inputs[cycle >> consts.log2_emulator_span] + if cycle & mask == 0 and input then + local h = assert(input:match("0x(%x+)"), input) + local data_hex = (h:gsub('..', function(cc) + return string.char(tonumber(cc, 16)) + end)) -- need to process input if ucycle == 0 then -- need to log cmio table.insert(logs, - machine.machine:log_send_cmio_response(cartesi.machine.HTIF_YIELD_REASON_ADVANCE_STATE, input, + machine.machine:log_send_cmio_response(cartesi.machine.HTIF_YIELD_REASON_ADVANCE_STATE, data_hex, log_type )) table.insert(logs, machine.machine:log_uarch_step(log_type)) diff --git a/prt/client-lua/player/reader.lua b/prt/client-lua/player/reader.lua index b3beb935..bd2a0321 100644 --- a/prt/client-lua/player/reader.lua +++ b/prt/client-lua/player/reader.lua @@ -272,6 +272,24 @@ function Reader:read_commitment_joined(tournament_address) return ret end +function Reader:read_tournament_created(tournament_address, match_id_hash) + local sig = "newInnerTournament(bytes32,address)" + local data_sig = "(address)" + + local logs = self:_read_logs(tournament_address, sig, { match_id_hash:hex_string(), false, false }, data_sig) + assert(#logs <= 1) + + if #logs == 0 then return false end + local log = logs[1] + + local ret = { + parent_match = match_id_hash, + new_tournament = log.decoded_data[1], + } + + return ret +end + function Reader:read_commitment(tournament_address, commitment_hash) local sig = "getCommitment(bytes32)((uint64,uint64),bytes32)" @@ -310,24 +328,6 @@ function Reader:read_constants(tournament_address) return constants end -function Reader:read_tournament_created(tournament_address, match_id_hash) - local sig = "newInnerTournament(bytes32,address)" - local data_sig = "(address)" - - local logs = self:_read_logs(tournament_address, sig, { match_id_hash:hex_string(), false, false }, data_sig) - assert(#logs <= 1) - - if #logs == 0 then return false end - local log = logs[1] - - local ret = { - parent_match = match_id_hash, - new_tournament = log.decoded_data[1], - } - - return ret -end - function Reader:read_cycle(address, match_id_hash) local sig = "getMatchCycle(bytes32)(uint256)" local ret = self:_call(address, sig, { match_id_hash:hex_string() }) diff --git a/prt/client-lua/utils/helper.lua b/prt/client-lua/utils/helper.lua index 28394ee1..bb681c83 100644 --- a/prt/client-lua/utils/helper.lua +++ b/prt/client-lua/utils/helper.lua @@ -137,4 +137,18 @@ function helper.is_pid_alive(pid) return false -- Returns false if the process is not alive end +-- Function to create a directory and its parents using os.execute +function helper.mkdir_p(path) + -- Use os.execute to call the mkdir command with -p option + local command = "mkdir -p " .. path + local result = os.execute(command) + + -- Check if the command was successful + if result then + print("Directory created successfully: " .. path) + else + print("Failed to create directory: " .. path) + end +end + return helper diff --git a/prt/client-rs/src/arena/sender.rs b/prt/client-rs/src/arena/sender.rs index b89b386b..86a25a9c 100644 --- a/prt/client-rs/src/arena/sender.rs +++ b/prt/client-rs/src/arena/sender.rs @@ -75,6 +75,10 @@ impl EthArenaSender { }) } + pub fn client(&self) -> Arc { + self.client.clone() + } + pub async fn nonce(&self) -> std::result::Result> { Ok(self .client diff --git a/prt/client-rs/src/db/compute_state_access.rs b/prt/client-rs/src/db/compute_state_access.rs index 2d4be861..cefae18c 100644 --- a/prt/client-rs/src/db/compute_state_access.rs +++ b/prt/client-rs/src/db/compute_state_access.rs @@ -57,6 +57,9 @@ impl ComputeStateAccess { // the json file should be "/compute_data/0x_root_tournament_address/inputs_and_leafs.json" let work_dir = format!("{compute_data_path}/{root_tournament}"); let work_path = PathBuf::from(work_dir); + if !work_path.exists() { + fs::create_dir_all(&work_path)?; + } let db_path = work_path.join("db"); let no_create_flags = OpenFlags::default() & !OpenFlags::SQLITE_OPEN_CREATE; let handle_rollups; @@ -71,8 +74,7 @@ impl ComputeStateAccess { }); } Err(_) => { - // create new database - info!("create new database"); + info!("create new database for dispute"); let mut connection = Connection::open(&db_path)?; migrations::migrate_to_latest(&mut connection).unwrap(); @@ -164,7 +166,7 @@ impl ComputeStateAccess { compute_data::insert_compute_tree(&conn, tree_root, tree_leafs) } - pub fn closest_snapshot(&self, base_cycle: u64) -> Result> { + pub fn closest_snapshot(&self, base_cycle: u64) -> Result> { let mut snapshots = Vec::new(); // iterate through the snapshot directory, find the one whose cycle number is closest to the base_cycle @@ -188,7 +190,7 @@ impl ComputeStateAccess { .binary_search_by_key(&base_cycle, |k| k.0) .unwrap_or_else(|x| if x > 0 { x - 1 } else { x }); - Ok(snapshots.get(pos).map(|t| t.1.clone())) + Ok(snapshots.get(pos).map(|t| t.clone())) } } @@ -237,37 +239,37 @@ mod compute_state_access_tests { assert_eq!( access.closest_snapshot(100).unwrap(), - Some(access.work_path.join(format!("99"))) + Some((99, access.work_path.join(format!("99")))) ); assert_eq!( access.closest_snapshot(150).unwrap(), - Some(access.work_path.join(format!("150"))) + Some((150, access.work_path.join(format!("150")))) ); assert_eq!( access.closest_snapshot(200).unwrap(), - Some(access.work_path.join(format!("200"))) + Some((200, access.work_path.join(format!("200")))) ); assert_eq!( access.closest_snapshot(300).unwrap(), - Some(access.work_path.join(format!("300"))) + Some((300, access.work_path.join(format!("300")))) ); assert_eq!( access.closest_snapshot(7).unwrap(), - Some(access.work_path.join(format!("5"))) + Some((5, access.work_path.join(format!("5")))) ); assert_eq!( access.closest_snapshot(10000).unwrap(), - Some(access.work_path.join(format!("300"))) + Some((300, access.work_path.join(format!("300")))) ); assert_eq!( access.closest_snapshot(100000).unwrap(), - Some(access.work_path.join(format!("99999"))) + Some((9999, access.work_path.join(format!("99999")))) ); } diff --git a/prt/client-rs/src/machine/commitment.rs b/prt/client-rs/src/machine/commitment.rs index 9056e29d..0b03b0d4 100644 --- a/prt/client-rs/src/machine/commitment.rs +++ b/prt/client-rs/src/machine/commitment.rs @@ -123,7 +123,7 @@ fn advance_instruction( let cycle = (instruction + 1) << (log2_stride - constants::LOG2_UARCH_SPAN); machine.run(base_cycle + cycle)?; let state = machine.machine_state()?; - let control_flow = if state.halted { + let control_flow = if state.halted | state.yielded { leafs.push((state.root_hash, instruction_count - instruction + 1)); builder.append_repeated(state.root_hash, instruction_count - instruction + 1); ControlFlow::Break(()) @@ -182,6 +182,12 @@ fn snapshot_base_cycle( base_cycle: u64, db: &ComputeStateAccess, ) -> Result<()> { + let mask = arithmetic::max_uint(constants::LOG2_EMULATOR_SPAN); + if db.handle_rollups && base_cycle & mask == 0 && !machine.machine_state()?.yielded { + // don't snapshot a machine state that's freshly fed with input without advance + return Ok(()); + } + let snapshot_path = db.work_path.join(format!("{}", base_cycle)); machine.snapshot(&snapshot_path)?; Ok(()) diff --git a/prt/client-rs/src/machine/commitment_builder.rs b/prt/client-rs/src/machine/commitment_builder.rs index 44e0b839..15b5c413 100644 --- a/prt/client-rs/src/machine/commitment_builder.rs +++ b/prt/client-rs/src/machine/commitment_builder.rs @@ -10,6 +10,7 @@ use crate::{ }; use anyhow::Result; +use log::debug; use std::{ collections::{hash_map::Entry, HashMap}, path::PathBuf, @@ -38,21 +39,14 @@ impl CachingMachineCommitmentBuilder { ) -> Result { if let Entry::Vacant(e) = self.commitments.entry(level) { e.insert(HashMap::new()); - } else if self.commitments[&level].contains_key(&base_cycle) { - return Ok(self.commitments[&level][&base_cycle].clone()); + } else if let Some(commitment) = self.commitments[&level].get(&base_cycle) { + return Ok(commitment.clone()); } let mut machine = MachineInstance::new(&self.machine_path)?; - if let Some(snapshot_path) = db.closest_snapshot(base_cycle)? { - machine.load_snapshot(&PathBuf::from(snapshot_path))?; + if let Some(snapshot) = db.closest_snapshot(base_cycle)? { + machine.load_snapshot(&snapshot.1, snapshot.0)?; }; - if db.handle_rollups { - // treat it as rollups - machine.run_with_inputs(base_cycle, &db.inputs()?)?; - } else { - // treat it as compute - machine.run(base_cycle)?; - } let commitment = { let leafs = db.compute_leafs(level, base_cycle)?; @@ -60,6 +54,15 @@ impl CachingMachineCommitmentBuilder { if leafs.len() > 0 { build_machine_commitment_from_leafs(&mut machine, leafs)? } else { + if db.handle_rollups { + debug!("run with inputs"); + // treat it as rollups + machine.run_with_inputs(base_cycle, &db.inputs()?)?; + } else { + debug!("run without inputs"); + // treat it as compute + machine.run(base_cycle)?; + } // leafs are not cached, build merkle by running the machine build_machine_commitment( &mut machine, diff --git a/prt/client-rs/src/machine/instance.rs b/prt/client-rs/src/machine/instance.rs index c457ad7e..8b960169 100644 --- a/prt/client-rs/src/machine/instance.rs +++ b/prt/client-rs/src/machine/instance.rs @@ -8,7 +8,9 @@ use cartesi_machine::{ log::{AccessLog, AccessLogType}, machine::Machine, }; +use log::debug; +use alloy::hex::ToHexExt; use anyhow::Result; use ruint::aliases::U256; use std::path::Path; @@ -17,6 +19,7 @@ use std::path::Path; pub struct MachineState { pub root_hash: Digest, pub halted: bool, + pub yielded: bool, pub uhalted: bool, } @@ -24,8 +27,10 @@ impl std::fmt::Display for MachineState { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, - "{{root_hash = {:?}, halted = {}, uhalted = {}}}", - self.root_hash, self.halted, self.uhalted + "{{root_hash = {}, halted = {}, uhalted = {}}}", + self.root_hash.to_hex(), + self.halted, + self.uhalted ) } } @@ -61,19 +66,22 @@ impl MachineInstance { } // load inner machine with snapshot, update cycle, keep everything else the same - pub fn load_snapshot(&mut self, snapshot_path: &Path) -> Result<()> { + pub fn load_snapshot(&mut self, snapshot_path: &Path, snapshot_cycle: u64) -> Result<()> { let machine = Machine::load(&Path::new(snapshot_path), RuntimeConfig::default())?; let cycle = machine.read_mcycle()?; // Machine can not go backward behind the initial machine assert!(cycle >= self.start_cycle); - self.cycle = cycle - self.start_cycle; + self.cycle = snapshot_cycle; assert_eq!(machine.read_uarch_cycle()?, 0); self.machine = machine; + debug!("load from {}", snapshot_path.display()); + debug!("loaded machine {}", self.machine_state()?); + Ok(()) } @@ -104,7 +112,8 @@ impl MachineInstance { let mut logs = Vec::new(); if handle_rollups { // treat it as rollups - self.run_with_inputs(cycle, &inputs)?; + self.run_with_inputs(cycle - 1, &inputs)?; + self.run(cycle)?; let mask = arithmetic::max_uint(constants::LOG2_EMULATOR_SPAN); let input = inputs.get((cycle >> constants::LOG2_EMULATOR_SPAN) as usize); @@ -146,6 +155,7 @@ impl MachineInstance { } pub fn run(&mut self, cycle: u64) -> Result<()> { + debug!("self cycle: {}, target cycle: {}", self.cycle, cycle); assert!(self.cycle <= cycle); let physical_cycle = arithmetic::add_and_clamp(self.start_cycle, cycle); @@ -156,6 +166,11 @@ impl MachineInstance { break; } + let yielded = self.machine.read_iflags_y()?; + if yielded { + break; + } + let mcycle = self.machine.read_mcycle()?; if mcycle == physical_cycle { break; @@ -183,6 +198,9 @@ impl MachineInstance { } pub fn run_with_inputs(&mut self, cycle: u64, inputs: &Vec>) -> Result<()> { + debug!("current cycle: {}", self.cycle); + debug!("target cycle: {}", cycle); + let input_mask = arithmetic::max_uint(constants::LOG2_EMULATOR_SPAN); let current_input_index = self.cycle >> constants::LOG2_EMULATOR_SPAN; @@ -193,20 +211,31 @@ impl MachineInstance { } else { next_input_index = current_input_index + 1; } - let mut next_input_cycle = next_input_index << constants::LOG2_EMULATOR_SPAN; - while next_input_cycle < cycle { + while next_input_cycle <= cycle { + debug!("next input index: {}", next_input_index); + debug!("run to next input cycle: {}", next_input_cycle); self.run(next_input_cycle)?; let input = inputs.get(next_input_index as usize); if let Some(data) = input { + debug!( + "before input, machine state: {}", + self.machine.get_root_hash()? + ); + debug!("input: 0x{}", data.encode_hex()); self.machine .send_cmio_response(htif::fromhost::ADVANCE_STATE, data)?; + debug!( + "after input, machine state: {}", + self.machine.get_root_hash()? + ); } next_input_index += 1; next_input_cycle = next_input_index << constants::LOG2_EMULATOR_SPAN; } + debug!("run to target cycle: {}", cycle); self.run(cycle)?; Ok(()) @@ -228,11 +257,13 @@ impl MachineInstance { pub fn machine_state(&mut self) -> Result { let root_hash = self.machine.get_root_hash()?; let halted = self.machine.read_iflags_h()?; + let yielded = self.machine.read_iflags_y()?; let uhalted = self.machine.read_uarch_halt_flag()?; Ok(MachineState { root_hash: Digest::from_digest(root_hash.as_bytes())?, halted, + yielded, uhalted, }) } diff --git a/prt/client-rs/src/strategy/player.rs b/prt/client-rs/src/strategy/player.rs index bd16aeec..3629566a 100644 --- a/prt/client-rs/src/strategy/player.rs +++ b/prt/client-rs/src/strategy/player.rs @@ -359,8 +359,8 @@ impl Player { let proof = { let mut machine = MachineInstance::new(&self.machine_path)?; - if let Some(snapshot_path) = self.db.closest_snapshot(cycle)? { - machine.load_snapshot(&PathBuf::from(snapshot_path))?; + if let Some(snapshot) = self.db.closest_snapshot(cycle)? { + machine.load_snapshot(&snapshot.1, snapshot.0)?; }; let inputs = self.db.inputs()?; machine.get_logs(cycle, ucycle, inputs, self.db.handle_rollups)? diff --git a/prt/tests/compute-rs/Dockerfile b/prt/tests/compute-rs/Dockerfile index cb765f24..71854c30 100644 --- a/prt/tests/compute-rs/Dockerfile +++ b/prt/tests/compute-rs/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.79.0-bookworm AS chef +FROM rust:1.81.0-bookworm AS chef ENV CARGO_REGISTRIES_CARTESI_INDEX=https://github.com/cartesi/crates-index RUN rustup component add rustfmt @@ -9,8 +9,8 @@ RUN apt-get update && \ FROM chef AS planner COPY ./machine/rust-bindings /app/machine/rust-bindings COPY ./common-rs /app/common-rs -COPY ./prt/contract-bindings /app/prt/contract-bindings COPY ./prt/client-rs /app/prt/client-rs +COPY ./prt/contract-bindings /app/prt/contract-bindings COPY ./prt/tests/compute-rs /app/prt/tests/compute-rs WORKDIR /app/prt/tests/compute-rs @@ -42,7 +42,7 @@ FROM --platform=linux/amd64 cartesi/machine-emulator:0.18.1 USER root RUN apt-get update && \ apt-get install -y procps curl xxd clang -ENV FOUNDRY_NIGHTLY nightly-5b7e4cb3c882b28f3c32ba580de27ce7381f415a +ENV FOUNDRY_NIGHTLY nightly-805d7cee81e78e9163b8ce3d86a0c3beb39772d4 RUN curl -sSL https://github.com/foundry-rs/foundry/releases/download/${FOUNDRY_NIGHTLY}/foundry_nightly_linux_$(dpkg --print-architecture).tar.gz | \ tar -zx -C /usr/local/bin @@ -67,7 +67,5 @@ WORKDIR /root/prt/contracts RUN forge --version RUN forge build -RUN mkdir -p /compute_data/0xa16E02E87b7454126E5E10d957A927A7F5B5d2be - WORKDIR /root/prt/tests/compute -ENTRYPOINT ["./compute-test-entrypoint.sh"] +ENTRYPOINT ["./entrypoint.sh"] diff --git a/prt/tests/compute-rs/Makefile b/prt/tests/compute-rs/Makefile index e9ac7736..22a6f45d 100644 --- a/prt/tests/compute-rs/Makefile +++ b/prt/tests/compute-rs/Makefile @@ -16,19 +16,19 @@ create-image: @docker build -t cartesi/prt-compute:rs -f Dockerfile ../../../ test-simple: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test \ --env MACHINE_PATH=$(SIMPLE_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:rs test-stress: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test \ --env MACHINE_PATH=$(STRESS_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:rs test-doom: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test \ --env MACHINE_PATH=$(DOOM_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:rs diff --git a/prt/tests/compute/Dockerfile b/prt/tests/compute/Dockerfile index c612d3a6..5437334d 100644 --- a/prt/tests/compute/Dockerfile +++ b/prt/tests/compute/Dockerfile @@ -3,7 +3,7 @@ FROM cartesi/machine-emulator:0.18.1 USER 0 RUN apt-get -y update && \ apt-get -y install curl gcc imagemagick make procps xxd pkg-config -ENV FOUNDRY_NIGHTLY nightly-5b7e4cb3c882b28f3c32ba580de27ce7381f415a +ENV FOUNDRY_NIGHTLY nightly-805d7cee81e78e9163b8ce3d86a0c3beb39772d4 RUN curl -sSL https://github.com/foundry-rs/foundry/releases/download/${FOUNDRY_NIGHTLY}/foundry_nightly_linux_$(dpkg --print-architecture).tar.gz | \ tar -zx -C /usr/local/bin @@ -39,14 +39,13 @@ COPY ./prt/client-lua/ . WORKDIR "/app/tests/compute" COPY ./prt/tests/compute/ . -RUN chmod +x compute-test-entrypoint.sh +RUN chmod +x entrypoint.sh RUN chmod +x prt_compute.lua RUN chmod +x doom_showcase/process_doom_graphics.lua WORKDIR "/app" RUN mkdir -p pixels RUN mkdir -p outputs -RUN mkdir -p /compute_data/0xa16E02E87b7454126E5E10d957A927A7F5B5d2be WORKDIR "/app/tests/compute" -ENTRYPOINT ["./compute-test-entrypoint.sh"] +ENTRYPOINT ["./entrypoint.sh"] diff --git a/prt/tests/compute/Makefile b/prt/tests/compute/Makefile index 3ceca61d..e69dd2e1 100644 --- a/prt/tests/compute/Makefile +++ b/prt/tests/compute/Makefile @@ -16,19 +16,19 @@ create-image: @docker build -t cartesi/prt-compute:lua -f Dockerfile ../../../ test-simple: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test \ --env MACHINE_PATH=$(SIMPLE_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:lua test-stress: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test \ --env MACHINE_PATH=$(STRESS_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:lua test-doom: create-image - @docker run --rm \ + @docker run --rm --name prt-compute-test \ --env MACHINE_PATH=$(DOOM_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ cartesi/prt-compute:lua @@ -41,7 +41,7 @@ clean-graphics: @rm -r pixels outputs test-doom-with-graphics: create-image create-doom-dirs - @docker run --rm \ + @docker run --rm --name prt-compute-test \ --env MACHINE_PATH=$(DOOM_MACHINE_PATH) \ --env LUA_NODE=$(LUA_NODE) \ --mount type=bind,source="$(shell pwd)/pixels",target=/app/pixels \ diff --git a/prt/tests/compute/README.md b/prt/tests/compute/README.md index 580c4e67..c01935d6 100644 --- a/prt/tests/compute/README.md +++ b/prt/tests/compute/README.md @@ -2,7 +2,7 @@ This directory contains a prototype node written in Lua. The purpose of this Lua node is testing and prototyping only; the real production node is written in Rust. -Furthermore, this node implements only compute (_i.e._ a one-shot computation, like a rollups with no inputs). +Furthermore, this node implements only compute (_i.e._ a one-shot computation, the machine doesn't yield for inputs). Remember to either clone the repository with the flag `--recurse-submodules`, or run `git submodule update --recursive --init` after cloning. You need a docker installation to run the Dave Lua node. @@ -38,7 +38,7 @@ These players come in multiple flavours: If no other player is actively defending this claim, it will lose by timeout. To add more players of different kinds, you can edit the [`prt_compute.lua`](prt_compute.lua) file. -To run the full example, execute one of the following commands from the current path path (_i.e._ [`prt/lua_poc`](.)): +To run the full example, execute one of the following commands from the current path: ``` make test-simple diff --git a/prt/tests/compute/blockchain/node.lua b/prt/tests/compute/blockchain/node.lua index 1b315ef0..c7d5ba94 100644 --- a/prt/tests/compute/blockchain/node.lua +++ b/prt/tests/compute/blockchain/node.lua @@ -2,10 +2,12 @@ local helper = require "utils.helper" local default_account_number = 40 +-- spawn an anvil node with 40 accounts, auto-mine, and finalize block at height N-2 local function start_blockchain() print(string.format("Starting blockchain with %d accounts...", default_account_number)) - local cmd = string.format([[sh -c "echo $$ ; exec anvil --block-time 1 -a %d > anvil.log 2>&1"]], + local cmd = string.format( + [[sh -c "echo $$ ; exec anvil --block-time 1 --slots-in-an-epoch 1 -a %d > anvil.log 2>&1"]], default_account_number) local reader = io.popen(cmd) diff --git a/prt/tests/compute/compute-test-entrypoint.sh b/prt/tests/compute/entrypoint.sh similarity index 100% rename from prt/tests/compute/compute-test-entrypoint.sh rename to prt/tests/compute/entrypoint.sh diff --git a/prt/tests/compute/prt_compute.lua b/prt/tests/compute/prt_compute.lua index 6b21c1c5..00f1fd7b 100755 --- a/prt/tests/compute/prt_compute.lua +++ b/prt/tests/compute/prt_compute.lua @@ -36,27 +36,42 @@ local function write_json_file(leafs, root_tournament) local flat = require "utils.flat" local json = require "utils.json" - local file_path = string.format("/compute_data/%s/inputs_and_leafs.json", root_tournament) + local work_path = string.format("/compute_data/%s", root_tournament) + if not helper.exists(work_path) then + helper.mkdir_p(work_path) + end + local file_path = string.format("%s/inputs_and_leafs.json", work_path) local file = assert(io.open(file_path, "w")) file:write(json.encode(flat.flatten(inputs_and_leafs).flat_object)) assert(file:close()) end +local function get_root_constants(root_tournament) + local Reader = require "player.reader" + local reader = Reader:new(blockchain_constants.endpoint) + local root_constants = reader:read_constants(root_tournament) + + return root_constants +end + -- Function to setup players -local function setup_players(use_lua_node, extra_data, root_constants, root_tournament, machine_path) +local function setup_players(use_lua_node, extra_data, root_tournament, machine_path) + local root_constants = get_root_constants(root_tournament) + + local inputs = nil local player_coroutines = {} local player_index = 1 print("Calculating root commitment...") local snapshot_dir = string.format("/compute_data/%s", root_tournament) local builder = CommitmentBuilder:new(machine_path, snapshot_dir) - local root_commitment = builder:build(0, 0, root_constants.log2_step, root_constants.height, nil) + local root_commitment = builder:build(0, 0, root_constants.log2_step, root_constants.height, inputs) if use_lua_node then -- use Lua node to defend print("Setting up Lua honest player") local start_hero = require "runners.hero_runner" player_coroutines[player_index] = start_hero(player_index, machine_path, root_commitment, root_tournament, - extra_data) + extra_data, inputs) else -- use Rust node to defend print("Setting up Rust honest player") @@ -72,7 +87,7 @@ local function setup_players(use_lua_node, extra_data, root_constants, root_tour local scoped_require = new_scoped_require(_ENV) local start_sybil = scoped_require "runners.sybil_runner" player_coroutines[player_index] = start_sybil(player_index, machine_path, root_commitment, root_tournament, - FAKE_COMMITMENT_COUNT) + FAKE_COMMITMENT_COUNT, inputs) player_index = player_index + 1 end @@ -89,12 +104,38 @@ local function setup_players(use_lua_node, extra_data, root_constants, root_tour return player_coroutines end -local function get_root_constants(root_tournament) - local Reader = require "player.reader" - local reader = Reader:new(blockchain_constants.endpoint) - local root_constants = reader:read_constants(root_tournament) +-- Function to run players +local function run_players(player_coroutines) + while true do + local idle = true + local has_live_coroutine = false + for i, c in ipairs(player_coroutines) do + if c then + local success, ret = coroutine.resume(c) + local status = coroutine.status(c) + + if status == "dead" then + player_coroutines[i] = false + end + if not success then + print(string.format("coroutine %d fail to resume with error: %s", i, ret)) + elseif ret then + has_live_coroutine = true + idle = idle and ret.idle + end + end + end - return root_constants + if not has_live_coroutine then + print("No active players, ending program...") + break + end + + if idle then + print(string.format("All players idle, fastforward blockchain for %d seconds...", FAST_FORWARD_TIME)) + blockchain_utils.advance_time(FAST_FORWARD_TIME, blockchain_constants.endpoint) + end + end end -- Main Execution @@ -109,39 +150,9 @@ time.sleep(NODE_DELAY) blockchain_utils.deploy_contracts() time.sleep(NODE_DELAY) -local root_constants = get_root_constants(root_tournament) -local player_coroutines = setup_players(use_lua_node, extra_data, root_constants, root_tournament, machine_path) -print("Hello from Dave lua prototype!") - -while true do - local idle = true - local has_live_coroutine = false - for i, c in ipairs(player_coroutines) do - if c then - local success, ret = coroutine.resume(c) - local status = coroutine.status(c) +local player_coroutines = setup_players(use_lua_node, extra_data, root_tournament, machine_path) +print("Hello from Dave compute lua prototype!") - if status == "dead" then - player_coroutines[i] = false - end - if not success then - print(string.format("coroutine %d fail to resume with error: %s", i, ret)) - elseif ret then - has_live_coroutine = true - idle = idle and ret.idle - end - end - end - - if not has_live_coroutine then - print("No active players, ending program...") - break - end - - if idle then - print(string.format("All players idle, fastforward blockchain for %d seconds...", FAST_FORWARD_TIME)) - blockchain_utils.advance_time(FAST_FORWARD_TIME, blockchain_constants.endpoint) - end -end +run_players(player_coroutines) print("Good-bye, world!") diff --git a/prt/tests/compute/runners/helpers/fake_commitment.lua b/prt/tests/compute/runners/helpers/fake_commitment.lua index 49eaeaf4..8e702a90 100644 --- a/prt/tests/compute/runners/helpers/fake_commitment.lua +++ b/prt/tests/compute/runners/helpers/fake_commitment.lua @@ -143,10 +143,11 @@ function FakeCommitmentBuilder:build(base_cycle, level, log2_stride, log2_stride log2_stride, log2_stride_count, inputs) + print("honest commitment", commitment) local fake_commitment = build_fake_commitment(commitment, self.fake_index, log2_stride) self.fake_commitments[level][base_cycle][self.fake_index] = fake_commitment - self.fake_index = false + -- self.fake_index = false return fake_commitment end diff --git a/prt/tests/compute/runners/hero_runner.lua b/prt/tests/compute/runners/hero_runner.lua index 9b50c476..9b78f2b8 100755 --- a/prt/tests/compute/runners/hero_runner.lua +++ b/prt/tests/compute/runners/hero_runner.lua @@ -5,7 +5,7 @@ local HonestStrategy = require "player.strategy" local Sender = require "player.sender" local Player = require "player.player" -local function hero_runner(player_id, machine_path, root_commitment, root_tournament, extra_data) +local function hero_runner(player_id, machine_path, root_commitment, root_tournament, extra_data, inputs) local hook if extra_data then @@ -18,7 +18,7 @@ local function hero_runner(player_id, machine_path, root_commitment, root_tourna local snapshot_dir = string.format("/compute_data/%s", root_tournament) local strategy = HonestStrategy:new( CommitmentBuilder:new(machine_path, snapshot_dir, root_commitment), - nil, + inputs, machine_path, Sender:new(blockchain_consts.pks[player_id], player_id, blockchain_consts.endpoint) ) diff --git a/prt/tests/compute/runners/rust_hero_runner.lua b/prt/tests/compute/runners/rust_hero_runner.lua index 43ff5207..ebe4f6a0 100644 --- a/prt/tests/compute/runners/rust_hero_runner.lua +++ b/prt/tests/compute/runners/rust_hero_runner.lua @@ -28,7 +28,7 @@ end local function create_react_once_runner(player_id, machine_path) local rust_compute_cmd = string.format( [[sh -c "echo $$ ; exec env MACHINE_PATH='%s' RUST_LOG='info' \ - ./cartesi-prt-compute 2>&1 | tee -a honest.log"]], + ./cartesi-prt-compute 2>&1 | tee -a honest.log"]], machine_path) return coroutine.create(function() diff --git a/prt/tests/compute/runners/sybil_runner.lua b/prt/tests/compute/runners/sybil_runner.lua index e16fea9f..f84941ea 100755 --- a/prt/tests/compute/runners/sybil_runner.lua +++ b/prt/tests/compute/runners/sybil_runner.lua @@ -35,11 +35,11 @@ local function sybil_player(root_tournament, strategy, blockchain_endpoint, fake end -local function sybil_runner(player_id, machine_path, root_commitment, root_tournament, fake_commitment_count) +local function sybil_runner(player_id, machine_path, root_commitment, root_tournament, fake_commitment_count, inputs) local snapshot_dir = string.format("/compute_data/%s", root_tournament) local strategy = HonestStrategy:new( FakeCommitmentBuilder:new(machine_path, root_commitment, snapshot_dir), - nil, + inputs, machine_path, Sender:new(blockchain_consts.pks[player_id], player_id, blockchain_consts.endpoint) )