diff --git a/CHANGELOG.md b/CHANGELOG.md index a17c47c..bc54d5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,11 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). ## Unreleased + +### Changed +* `bonsol` cli option requirements and error messages updated for added clarity + +### Fixed * **Breaking**: `execute_v1` interface instruction now uses the new `InputRef` to improve CU usage. * Adds a callback struct to use the input_hash and committed_outputs from the callback program ergonomically. * Fixes requester/payer mismatch in the node account selection @@ -12,12 +17,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. * **Breaking**: Changes flatbuffer `Account` struct to have 8 byte alignment due a possible bug in the flatbufers compiler. [https://github.com/google/flatbuffers/pull/8398](Bug Here) * **Breaking**: Flatbuffers was upgraded to `24.3.25` * `risc0-groth16-prover` binaries (rapidsnark & stark-verify) are available to the nix store, partially unblocking NixOS support. -* Fixed alignment of `Account` struct in the schemas. * `flatbuffers` code is now dynamically generated at build time * Fixed alignment of `Account` struct in the schemas. - - ## [0.2.1] - 2024-10-13 ### Changed diff --git a/Cargo.lock b/Cargo.lock index c5ad408..6e8898c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1214,6 +1214,7 @@ dependencies = [ "solana-rpc-client", "solana-sdk", "tera", + "thiserror", "tokio", ] @@ -7978,18 +7979,18 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 4d1df42..76f6823 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -22,7 +22,7 @@ hex = "0.4.3" byte-unit = "4.0.19" bytes = "1.4.0" cargo_toml = "0.20.3" -clap = { version = "4.4.2", features = ["derive"] } +clap = { version = "4.4.2", features = ["derive", "env"] } indicatif = "0.17.8" num-traits = "0.2.15" object_store = { version = "0.9.1", features = ["aws"] } @@ -43,6 +43,7 @@ solana-cli-config = { workspace = true } solana-rpc-client = { workspace = true } solana-sdk = { workspace = true } tera = "1.17.1" +thiserror = "1.0.65" tokio = { version = "1.38.0", features = ["full"] } bonsol-interface.workspace = true diff --git a/cli/README.md b/cli/README.md index 4010eda..56b2206 100644 --- a/cli/README.md +++ b/cli/README.md @@ -43,12 +43,12 @@ The output of the build command is a manifest.json file which is placed in the r You can deploy a bonsol program with the following command ``` -bonsol -k ./keypair.json -u http://localhost:8899 deploy -m {path to manifest.json} -t {type of deployment} -y {auto confirm} ... {upload type specific options} +bonsol -k ./keypair.json -u http://localhost:8899 deploy -m {path to manifest.json} -y {auto confirm} -t {s3|shadow-drive|url} ... {upload type specific options} ``` There will be many options for how to upload the program, the default is s3. Here is an example of how to deploy a program to s3 ``` -bonsol -k ./keypair.json -u http://localhost:8899 deploy -m program/manifest.json -t s3 --bucket bonsol-public-images --region us-east-1 --access-key {your key} --secret-key {your secret key} +bonsol -k ./keypair.json -u http://localhost:8899 deploy -m program/manifest.json -t s3 --bucket bonsol-public-images --region us-east-1 --access-key {your key} --secret-key {your secret key} ``` In the above example the manifest.json file is the file that was created by the build command. This will try to upload the binary to the s3 bucket and create a deployment account for the program. Programs are indexed by the image id, which is a kind of checksum of the program elf file. This means that if you change the elf file, the image id will change and the program will be deployed again under a new deployment account. Programs are immutable and can only be changed by redeploying the program. When a node downloads a program it will check the image id and if it doesnt match the deployment account it will reject the program. Furthermore when bonsol checks the proof, it will check the image id and if it doesnt match the deployment account and desired image id from execution request it will reject the proof. diff --git a/cli/src/build.rs b/cli/src/build.rs index df6a492..b91147f 100644 --- a/cli/src/build.rs +++ b/cli/src/build.rs @@ -3,109 +3,202 @@ use std::path::Path; use std::process::Command; use std::time::Duration; -use crate::common::*; use anyhow::Result; +use cargo_toml::Manifest; use indicatif::ProgressBar; use risc0_zkvm::compute_image_id; use solana_sdk::signer::Signer; +use crate::common::*; +use crate::error::{BonsolCliError, ZkManifestError}; + pub fn build(keypair: &impl Signer, zk_program_path: String) -> Result<()> { + validate_build_dependencies()?; + let bar = ProgressBar::new_spinner(); bar.enable_steady_tick(Duration::from_millis(100)); + let image_path = Path::new(&zk_program_path); - // ensure cargo risc0 is installed and has the plugin - if !cargo_has_plugin("risczero") || !cargo_has_plugin("binstall") || !has_executable("docker") { - bar.finish_and_clear(); - return Err(anyhow::anyhow!( - "Please install cargo-risczero and cargo-binstall and docker" - )); - } + let (cargo_package_name, input_order) = parse_cargo_manifest(image_path)?; + let build_result = + build_zkprogram_manifest(image_path, &keypair, cargo_package_name, input_order); + let manifest_path = image_path.join(MANIFEST_JSON); - let build_result = build_maifest(image_path, &keypair); - let manifest_path = image_path.join("manifest.json"); match build_result { Err(e) => { - bar.finish_with_message(format!("Error building image: {:?}", e)); + bar.finish_with_message(format!( + "Build failed for program '{}': {:?}", + image_path.to_string_lossy(), + e + )); Ok(()) } Ok(manifest) => { - serde_json::to_writer_pretty(File::create(&manifest_path).unwrap(), &manifest).unwrap(); + serde_json::to_writer_pretty(File::create(&manifest_path)?, &manifest)?; bar.finish_and_clear(); + println!("Build complete"); Ok(()) } } } -fn build_maifest( - image_path: &Path, - keypair: &impl Signer, -) -> Result { - let manifest_path = image_path.join("Cargo.toml"); - let manifest = cargo_toml::Manifest::from_path(&manifest_path).unwrap(); - let package = manifest +fn validate_build_dependencies() -> Result<(), BonsolCliError> { + const CARGO_RISCZERO: &str = "risczero"; + const DOCKER: &str = "docker"; + + let mut missing_deps = Vec::with_capacity(2); + + if !cargo_has_plugin(CARGO_RISCZERO) { + missing_deps.push(format!("cargo-{}", CARGO_RISCZERO)); + } + if !has_executable(DOCKER) { + missing_deps.push(DOCKER.into()); + } + + if !missing_deps.is_empty() { + return Err(BonsolCliError::MissingBuildDependencies { missing_deps }); + } + + Ok(()) +} + +fn parse_cargo_manifest_inputs( + manifest: &Manifest, + manifest_path_str: String, +) -> Result> { + const METADATA: &str = "metadata"; + const ZKPROGRAM: &str = "zkprogram"; + const INPUT_ORDER: &str = "input_order"; + + let meta = manifest .package .as_ref() - .map(|p| &p.name) - .ok_or(std::io::Error::new( - std::io::ErrorKind::Other, - "Invalid Cargo.toml", + .and_then(|p| p.metadata.as_ref()) + .ok_or(ZkManifestError::MissingPackageMetadata( + manifest_path_str.clone(), ))?; - let meta = manifest.package.as_ref().and_then(|p| p.metadata.as_ref()); - if meta.is_none() { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Invalid Cargo.toml, missing package metadata", - )); + let meta_table = meta.as_table().ok_or(ZkManifestError::ExpectedTable { + manifest_path: manifest_path_str.clone(), + name: METADATA.into(), + })?; + let zkprogram = meta_table + .get(ZKPROGRAM) + .ok_or(ZkManifestError::MissingProgramMetadata { + manifest_path: manifest_path_str.clone(), + meta: meta.to_owned(), + })?; + let zkprogram_table = zkprogram.as_table().ok_or(ZkManifestError::ExpectedTable { + manifest_path: manifest_path_str.clone(), + name: ZKPROGRAM.into(), + })?; + let input_order = + zkprogram_table + .get(INPUT_ORDER) + .ok_or(ZkManifestError::MissingInputOrder { + manifest_path: manifest_path_str.clone(), + zkprogram: zkprogram.to_owned(), + })?; + let inputs = input_order + .as_array() + .ok_or(ZkManifestError::ExpectedArray { + manifest_path: manifest_path_str.clone(), + name: INPUT_ORDER.into(), + })?; + + let (input_order, errs): ( + Vec>, + Vec>, + ) = inputs + .iter() + .map(|i| -> Result { + i.as_str() + .map(|s| s.to_string()) + .ok_or(ZkManifestError::InvalidInput(i.to_owned())) + }) + .partition(|res| res.is_ok()); + if !errs.is_empty() { + let errs: Vec = errs + .into_iter() + .map(|r| format!("Error: {:?}\n", r.unwrap_err())) + .collect(); + return Err(ZkManifestError::InvalidInputs { + manifest_path: manifest_path_str, + errs, + } + .into()); } - let inputs = meta - .unwrap() - .as_table() - .and_then(|m| m.get("zkprogram")) - .and_then(|m| m.as_table()) - .and_then(|m| m.get("input_order")) - .and_then(|m| m.as_array()) - .ok_or(std::io::Error::new( - std::io::ErrorKind::Other, - "Invalid Cargo.toml, missing zkprogram metadata", + Ok(input_order.into_iter().map(Result::unwrap).collect()) +} + +fn parse_cargo_manifest(image_path: &Path) -> Result<(String, Vec)> { + let cargo_manifest_path = image_path.join(CARGO_TOML); + let cargo_manifest_path_str = cargo_manifest_path.to_string_lossy().to_string(); + if !cargo_manifest_path.exists() { + return Err( + ZkManifestError::MissingManifest(image_path.to_string_lossy().to_string()).into(), + ); + } + let cargo_manifest = cargo_toml::Manifest::from_path(&cargo_manifest_path).map_err(|err| { + ZkManifestError::FailedToLoadManifest { + manifest_path: cargo_manifest_path_str.clone(), + err, + } + })?; + let cargo_package_name = cargo_manifest + .package + .as_ref() + .map(|p| p.name.clone()) + .ok_or(ZkManifestError::MissingPackageName( + cargo_manifest_path_str.clone(), ))?; + let input_order = parse_cargo_manifest_inputs(&cargo_manifest, cargo_manifest_path_str)?; + + Ok((cargo_package_name, input_order)) +} + +fn build_zkprogram_manifest( + image_path: &Path, + keypair: &impl Signer, + cargo_package_name: String, + input_order: Vec, +) -> Result { + const RISCV_DOCKER_PATH: &str = "target/riscv-guest/riscv32im-risc0-zkvm-elf/docker"; + const CARGO_RISCZERO_BUILD_ARGS: &[&str; 4] = + &["risczero", "build", "--manifest-path", "Cargo.toml"]; let binary_path = image_path - .join("target/riscv-guest/riscv32im-risc0-zkvm-elf/docker") - .join(package) - .join(package); - let output = Command::new("cargo") + .join(RISCV_DOCKER_PATH) + .join(&cargo_package_name) + .join(&cargo_package_name); + let output = Command::new(CARGO_COMMAND) .current_dir(image_path) - .arg("risczero") - .arg("build") - .arg("--manifest-path") - .arg("Cargo.toml") - .env("CARGO_TARGET_DIR", image_path.join("target")) + .args(CARGO_RISCZERO_BUILD_ARGS) + .env("CARGO_TARGET_DIR", image_path.join(TARGET_DIR)) .output()?; if output.status.success() { let elf_contents = fs::read(&binary_path)?; - let image_id = compute_image_id(&elf_contents) - .map_err(|_| std::io::Error::new(std::io::ErrorKind::Other, "Invalid image"))?; + let image_id = compute_image_id(&elf_contents).map_err(|err| { + BonsolCliError::FailedToComputeImageId { + binary_path: binary_path.to_string_lossy().to_string(), + err, + } + })?; let signature = keypair.sign_message(elf_contents.as_slice()); - let manifest = ZkProgramManifest { - name: package.to_string(), - binary_path: binary_path.to_str().unwrap().to_string(), - input_order: inputs - .iter() - .map(|i| i.as_str().unwrap().to_string()) - .collect(), + let zkprogram_manifest = ZkProgramManifest { + name: cargo_package_name, + binary_path: binary_path + .to_str() + .ok_or(ZkManifestError::InvalidBinaryPath)? + .to_string(), + input_order, image_id: image_id.to_string(), size: elf_contents.len() as u64, signature: signature.to_string(), }; - Ok(manifest) - } else { - let error = String::from_utf8_lossy(&output.stderr); - println!("Build failed: {}", error); - Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Build failed", - )) + return Ok(zkprogram_manifest); } + + Err(BonsolCliError::BuildFailure(String::from_utf8_lossy(&output.stderr).to_string()).into()) } diff --git a/cli/src/command.rs b/cli/src/command.rs index 14b6371..45ec694 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -1,44 +1,141 @@ -use clap::{command, Args, Parser, Subcommand, ValueEnum}; +use clap::{command, ArgGroup, Args, Parser, Subcommand, ValueEnum}; + #[derive(Parser, Debug)] #[command(version)] +#[command(group( + // Ensures mutual exclusivity of config, or keypair and rpc_url + ArgGroup::new("config_group") + .required(false) + .args(&["config"]) + .conflicts_with("rpc_url") + .conflicts_with("keypair") + .multiple(false) +))] pub struct BonsolCli { - #[arg(short = 'c', long)] + #[arg( + help = "The path to a Solana CLI config [Default: '~/.config/solana/cli/config.yml']", + short = 'c', + long + )] pub config: Option, - #[arg(short = 'k', long)] + + #[arg( + help = "The path to a Solana keypair file [Default: '~/.config/solana/id.json']", + short = 'k', + long, + requires = "rpc_url" + )] pub keypair: Option, - #[arg(short = 'u', long)] + + #[arg( + help = "The Solana cluster the Solana CLI will make requests to", + short = 'u', + long, + requires = "keypair" + )] pub rpc_url: Option, + #[command(subcommand)] - pub command: Commands, + pub command: Command, +} + +pub struct ParsedBonsolCli { + pub config: Option, + + pub keypair: Option, + + pub rpc_url: Option, + + pub command: ParsedCommand, +} + +impl TryFrom for ParsedBonsolCli { + type Error = anyhow::Error; + + fn try_from(value: BonsolCli) -> Result { + Ok(Self { + config: value.config, + keypair: value.keypair, + rpc_url: value.rpc_url, + command: value.command.try_into()?, + }) + } } #[derive(Debug, Clone, Args)] -pub struct S3UploadDestination { - #[arg(long)] - pub bucket: Option, - #[arg(long)] - pub access_key: Option, - #[arg(long)] - pub secret_key: Option, - #[arg(long)] - pub region: Option, +pub struct S3UploadArgs { + #[arg( + help = "Specify the S3 bucket name", + long, + required = true, + value_parser = |s: &str| { + if s.trim().is_empty() { + anyhow::bail!("expected a non-empty string representation of an S3 bucket name") + } + Ok(s.to_string()) + } + )] + pub bucket: String, + + #[arg( + help = "Specify the AWS access key ID", + long, + required = true, + env = "AWS_ACCESS_KEY_ID" + )] + pub access_key: String, + + #[arg( + help = "Specify the AWS secret access key", + long, + required = true, + env = "AWS_SECRET_ACCESS_KEY" + )] + pub secret_key: String, + + #[arg( + help = "Specify the AWS region", + long, + required = true, + env = "AWS_REGION" + )] + pub region: String, } #[derive(Debug, Clone, Args)] -pub struct ShadowDriveUpload { - #[arg(long)] +#[command(alias = "sd", group( + // If creating a new account, there's no reason to pass an already existing pubkey + ArgGroup::new("create_group") + .required(true) // Ensures that either `create` or `storage_account` is specified + .args(&["create", "storage_account"]) + .multiple(false) +))] +pub struct ShadowDriveUploadArgs { + #[arg(help = "Specify a Shadow Drive storage account public key", long)] pub storage_account: Option, - #[arg(long)] + + #[arg( + help = "Specify the size of the Shadow Drive storage account in MB", + long + )] pub storage_account_size_mb: Option, - #[arg(long)] + + #[arg(help = "Specify the name of the Shadow Drive storage account", long)] pub storage_account_name: Option, - #[arg(long)] - pub alternate_keypair: Option, // for testing on devnet but deploying to shadow drive + + #[arg( + help = "Specify an alternate keypair for testing on devnet, but deploying to Shadow Drive", + long + )] + pub alternate_keypair: Option, + + #[arg(help = "Create a new Shadow Drive storage account", long)] + pub create: bool, } #[derive(Debug, Clone, Args)] -pub struct UrlUploadDestination { - #[arg(long)] +pub struct UrlUploadArgs { + #[arg(help = "Specify a URL endpoint to deploy to", long, required = true)] pub url: String, } @@ -49,43 +146,119 @@ pub enum DeployType { Url, } +#[derive(Debug, Clone)] +pub enum DeployDestination { + S3(S3UploadArgs), + ShadowDrive(ShadowDriveUploadArgs), + Url(UrlUploadArgs), +} +impl DeployDestination { + pub fn try_parse( + deploy_type: DeployType, + s3: Option, + sd: Option, + url: Option, + ) -> anyhow::Result { + match deploy_type { + // Because we are not supporting a direct mapping (eg, subcommand), + // it's possible for a user to specify a deployment type and provide the wrong + // arguments. If we support subcommands in the future this will be + // much clearer, otherwise we would need to do more validation here + // to provide better error messages when the wrong args are present. + DeployType::S3 if s3.is_some() => Ok(Self::S3(s3.unwrap())), + DeployType::ShadowDrive if sd.is_some() => Ok(Self::ShadowDrive(sd.unwrap())), + DeployType::Url if url.is_some() => Ok(Self::Url(url.unwrap())), + _ => anyhow::bail!("The deployment type and its corresponding args do not match, expected args for deployment type '{:?}'", deploy_type), + } + } +} + +#[derive(Debug, Clone)] +pub struct DeployArgs { + pub dest: DeployDestination, + pub manifest_path: String, + pub auto_confirm: bool, +} +impl DeployArgs { + pub fn parse(dest: DeployDestination, manifest_path: String, auto_confirm: bool) -> Self { + Self { + dest, + manifest_path, + auto_confirm, + } + } +} + #[derive(Subcommand, Debug)] -pub enum Commands { +pub enum Command { + #[command( + about = "Deploy a program with various storage options, such as S3, ShadowDrive, or manually with a URL" + )] Deploy { - #[arg(short = 'm', long)] + #[arg( + help = "Specify the deployment type", + short = 't', + long, + value_enum, + required = true + )] + deploy_type: DeployType, + + #[command(flatten)] + s3: Option, + + #[command(flatten)] + shadow_drive: Option, + + #[command(flatten)] + url: Option, + + #[arg( + help = "The path to the program's manifest file (manifest.json)", + short = 'm', + long + )] manifest_path: String, - #[arg(short = 't', long)] - deploy_type: Option, - #[clap(flatten)] - s3_upload: S3UploadDestination, - #[clap(flatten)] - shadow_drive_upload: ShadowDriveUpload, - #[clap(flatten)] - url_upload: UrlUploadDestination, - #[arg(short = 'y', long)] + + #[arg( + help = "Whether to automatically confirm deployment", + short = 'y', + long + )] auto_confirm: bool, }, Build { - #[arg(short = 'z', long)] + #[arg( + help = "The path to a ZK program folder containing a Cargo.toml", + short = 'z', + long + )] zk_program_path: String, }, Execute { #[arg(short = 'f', long)] execution_request_file: Option, + // overridable settings #[arg(short = 'p', long)] program_id: Option, + #[arg(short = 'e', long)] execution_id: Option, + #[arg(short = 'x', long)] expiry: Option, + #[arg(short = 'm', long)] tip: Option, + #[arg(short = 'i')] input_file: Option, // overrides inputs in execution request file + /// wait for execution to be proven #[arg(short = 'w', long)] wait: bool, + /// timeout in seconds #[arg(short = 't', long)] timeout: Option, @@ -93,19 +266,124 @@ pub enum Commands { Prove { #[arg(short = 'm', long)] manifest_path: Option, + #[arg(short = 'p', long)] program_id: Option, + #[arg(short = 'i')] input_file: Option, + #[arg(short = 'e', long)] execution_id: String, + #[arg(short = 'o')] output_location: Option, }, Init { #[arg(short = 'd', long)] dir: Option, + #[arg(short = 'n', long)] project_name: String, }, } + +#[derive(Debug)] +pub enum ParsedCommand { + Deploy { + deploy_args: DeployArgs, + }, + Build { + zk_program_path: String, + }, + Execute { + execution_request_file: Option, + + program_id: Option, + + execution_id: Option, + + expiry: Option, + + tip: Option, + + input_file: Option, + + wait: bool, + + timeout: Option, + }, + Prove { + manifest_path: Option, + + program_id: Option, + + input_file: Option, + + execution_id: String, + + output_location: Option, + }, + Init { + dir: Option, + + project_name: String, + }, +} + +impl TryFrom for ParsedCommand { + type Error = anyhow::Error; + + fn try_from(value: Command) -> Result { + match value { + Command::Deploy { + deploy_type, + s3, + shadow_drive, + url, + manifest_path, + auto_confirm, + } => Ok(ParsedCommand::Deploy { + deploy_args: DeployArgs::parse( + DeployDestination::try_parse(deploy_type, s3, shadow_drive, url)?, + manifest_path, + auto_confirm, + ), + }), + Command::Build { zk_program_path } => Ok(ParsedCommand::Build { zk_program_path }), + Command::Execute { + execution_request_file, + program_id, + execution_id, + expiry, + tip, + input_file, + wait, + timeout, + } => Ok(ParsedCommand::Execute { + execution_request_file, + program_id, + execution_id, + expiry, + tip, + input_file, + wait, + timeout, + }), + Command::Prove { + manifest_path, + program_id, + input_file, + execution_id, + output_location, + } => Ok(ParsedCommand::Prove { + manifest_path, + program_id, + input_file, + execution_id, + output_location, + }), + Command::Init { dir, project_name } => Ok(ParsedCommand::Init { dir, project_name }), + } + } +} diff --git a/cli/src/common.rs b/cli/src/common.rs index ca6ab9e..625726e 100644 --- a/cli/src/common.rs +++ b/cli/src/common.rs @@ -1,4 +1,9 @@ -use anyhow::Result; +use std::fs::File; +use std::path::PathBuf; +use std::process::Command; +use std::str::FromStr; + +use anyhow::{Context, Result}; use bonsol_prover::input_resolver::{ProgramInput, ResolvedInput}; use bonsol_sdk::instructions::CallbackConfig; use bonsol_sdk::{InputT, InputType, ProgramInputType}; @@ -9,9 +14,13 @@ use serde::{Deserialize, Serialize}; use solana_rpc_client::nonblocking::rpc_client; use solana_sdk::instruction::AccountMeta; use solana_sdk::pubkey::Pubkey; -use std::fs::File; -use std::process::Command; -use std::str::FromStr; + +use crate::error::{BonsolCliError, ParseConfigError}; + +pub(crate) const MANIFEST_JSON: &str = "manifest.json"; +pub(crate) const CARGO_COMMAND: &str = "cargo"; +pub(crate) const CARGO_TOML: &str = "Cargo.toml"; +pub(crate) const TARGET_DIR: &str = "target"; pub fn cargo_has_plugin(plugin_name: &str) -> bool { Command::new("cargo") @@ -152,6 +161,60 @@ pub struct InputFile { pub inputs: Vec, } +/// Attempt to load the RPC URL and keypair file from a solana `config.yaml`. +pub(crate) fn try_load_from_config(config: Option) -> anyhow::Result<(String, String)> { + let whoami = String::from_utf8_lossy(&std::process::Command::new("whoami").output()?.stdout) + .trim_end() + .to_string(); + let default_config_path = solana_cli_config::CONFIG_FILE.as_ref(); + + let config_file = config.as_ref().map_or_else( + || -> anyhow::Result<&String> { + let inner_err = ParseConfigError::DefaultConfigNotFound { + whoami: whoami.clone(), + }; + let context = inner_err.context(None); + + // If no config is given, try to find it at the default location. + default_config_path + .and_then(|s| PathBuf::from_str(s).is_ok_and(|p| p.exists()).then_some(s)) + .ok_or(BonsolCliError::ParseConfigError(inner_err)) + .context(context) + }, + |config| -> anyhow::Result<&String> { + // Here we throw an error if the user provided a path to a config that does not exist. + // Instead of using the default location, it's better to show the user the path they + // expected to use was not valid. + if !PathBuf::from_str(config)?.exists() { + let inner_err = ParseConfigError::ConfigNotFound { + path: config.into(), + }; + let context = inner_err.context(None); + let err: anyhow::Error = BonsolCliError::ParseConfigError(inner_err).into(); + return Err(err.context(context)); + } + Ok(config) + }, + )?; + let config = { + let mut inner_err = ParseConfigError::Uninitialized; + + let mut maybe_config = solana_cli_config::Config::load(config_file).map_err(|err| { + let err = ParseConfigError::FailedToLoad { + path: config.unwrap_or(default_config_path.cloned().unwrap()), + err: format!("{err:?}"), + }; + inner_err = err.clone(); + BonsolCliError::ParseConfigError(err).into() + }); + if maybe_config.is_err() { + maybe_config = maybe_config.context(inner_err.context(Some(whoami))); + } + maybe_config + }?; + Ok((config.json_rpc_url, config.keypair_path)) +} + pub async fn sol_check(rpc_client: String, pubkey: Pubkey) -> bool { let rpc_client = rpc_client::RpcClient::new(rpc_client); if let Ok(account) = rpc_client.get_account(&pubkey).await { diff --git a/cli/src/deploy.rs b/cli/src/deploy.rs index ac9981d..e2dc7db 100644 --- a/cli/src/deploy.rs +++ b/cli/src/deploy.rs @@ -1,4 +1,3 @@ -use std::env; use std::fs::{self, File}; use std::path::Path; use std::str::FromStr; @@ -10,198 +9,227 @@ use indicatif::ProgressBar; use object_store::aws::AmazonS3Builder; use object_store::ObjectStore; use shadow_drive_sdk::models::ShadowFile; -use shadow_drive_sdk::ShadowDriveClient; +use shadow_drive_sdk::{Keypair, ShadowDriveClient, Signer}; use solana_rpc_client::nonblocking::rpc_client::RpcClient; use solana_sdk::commitment_config::CommitmentConfig; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::read_keypair_file; -use crate::command::{DeployType, S3UploadDestination, ShadowDriveUpload, UrlUploadDestination}; +use crate::command::{DeployArgs, DeployDestination, S3UploadArgs, ShadowDriveUploadArgs}; use crate::common::ZkProgramManifest; +use crate::error::{BonsolCliError, S3ClientError, ShadowDriveClientError, ZkManifestError}; -pub async fn deploy( - rpc: String, - signer: &impl shadow_drive_sdk::Signer, - manifest_path: String, - s3_upload: S3UploadDestination, - shadow_drive_upload: ShadowDriveUpload, - url_upload: UrlUploadDestination, - auto_confirm: bool, - deploy_type: Option, -) -> Result<()> { +pub async fn deploy(rpc_url: String, signer: Keypair, deploy_args: DeployArgs) -> Result<()> { let bar = ProgressBar::new_spinner(); - let rpc_url = rpc.clone(); - let rpc_client = RpcClient::new_with_commitment(rpc, CommitmentConfig::confirmed()); - let manifest_path = Path::new(&manifest_path); - let manifest_file = File::open(manifest_path) - .map_err(|e| anyhow::anyhow!("Error opening manifest file: {:?}", e))?; - - let manifest: ZkProgramManifest = serde_json::from_reader(manifest_file) - .map_err(|e| anyhow::anyhow!("Error parsing manifest file: {:?}", e))?; - let loaded_binary = fs::read(&manifest.binary_path) - .map_err(|e| anyhow::anyhow!("Error loading binary: {:?}", e))?; - let url: String = match deploy_type { - Some(DeployType::S3) => { - let bucket = s3_upload - .bucket - .ok_or(anyhow::anyhow!("Please provide a bucket name"))?; - let region = s3_upload.region.or_else(|| env::var("AWS_REGION").ok()); - let access_key = s3_upload - .access_key - .or_else(|| env::var("AWS_ACCESS_KEY_ID").ok()); - let secret_key = s3_upload - .secret_key - .or_else(|| env::var("AWS_SECRET_ACCESS_KEY").ok()); - if region.is_none() || access_key.is_none() || secret_key.is_none() { - bar.finish_and_clear(); - return Err(anyhow::anyhow!("Invalid AWS credentials")); - } - let region = region.unwrap(); - let access_key = access_key.unwrap(); - let secret_key = secret_key.unwrap(); - if bucket.is_empty() { - bar.finish_and_clear(); - return Err(anyhow::anyhow!("Please provide a bucket name")); - } + let rpc_client = RpcClient::new_with_commitment(rpc_url.clone(), CommitmentConfig::confirmed()); + let DeployArgs { + dest, + manifest_path, + auto_confirm, + } = deploy_args; + + let manifest_file = File::open(Path::new(&manifest_path)).map_err(|err| { + BonsolCliError::ZkManifestError(ZkManifestError::FailedToOpen { + manifest_path: manifest_path.clone(), + err, + }) + })?; + let manifest: ZkProgramManifest = serde_json::from_reader(manifest_file).map_err(|err| { + BonsolCliError::ZkManifestError(ZkManifestError::FailedDeserialization { + manifest_path, + err, + }) + })?; + let loaded_binary = fs::read(&manifest.binary_path).map_err(|err| { + BonsolCliError::ZkManifestError(ZkManifestError::FailedToLoadBinary { + binary_path: manifest.binary_path.clone(), + err, + }) + })?; + let url: String = match dest { + DeployDestination::S3(s3_upload) => { + let S3UploadArgs { + bucket, + access_key, + secret_key, + region, + .. + } = s3_upload; + let s3_client = AmazonS3Builder::new() - .with_bucket_name(bucket.clone()) + .with_bucket_name(&bucket) .with_region(®ion) .with_access_key_id(&access_key) .with_secret_access_key(&secret_key) .build() - .map_err(|e| anyhow::anyhow!("Error creating S3 client: {:?}", e))?; + .map_err(|err| { + BonsolCliError::S3ClientError(S3ClientError::FailedToBuildClient { + args: vec![ + format!("bucket: {bucket}"), + format!("access_key: {access_key}"), + format!( + "secret_key: {}..{}", + &secret_key[..4], + &secret_key[secret_key.len() - 4..] + ), + format!("region: {region}"), + ], + err, + }) + })?; + let dest = object_store::path::Path::from(format!("{}-{}", manifest.name, manifest.image_id)); - let destc = dest.clone(); - //get the file to see if it exists - let exists = s3_client.head(&destc).await.is_ok(); let url = format!("https://{}.s3.{}.amazonaws.com/{}", bucket, region, dest); - if exists { + // get the file to see if it exists + if s3_client.head(&dest).await.is_ok() { bar.set_message("File already exists, skipping upload"); - Ok::<_, anyhow::Error>(url) } else { - let upload = s3_client.put(&destc, loaded_binary.into()).await; - match upload { - Ok(_) => { - bar.finish_and_clear(); - Ok::<_, anyhow::Error>(url) - } - Err(e) => { - bar.finish_and_clear(); - anyhow::bail!("Error uploading to {} {:?}", dest.to_string(), e) - } - } + s3_client + .put(&dest, loaded_binary.into()) + .await + .map_err(|err| { + BonsolCliError::S3ClientError(S3ClientError::UploadFailed { dest, err }) + })?; } + + bar.finish_and_clear(); + url } - Some(DeployType::ShadowDrive) => { - let storage_account = shadow_drive_upload - .storage_account - .ok_or(anyhow::anyhow!("Please provide a storage account"))?; - let shadow_drive = ShadowDriveClient::new(signer, &rpc_url); - let alt_client = if let Some(alt_keypair) = shadow_drive_upload.alternate_keypair { - Some(ShadowDriveClient::new( - read_keypair_file(Path::new(&alt_keypair)) - .map_err(|e| anyhow::anyhow!("Invalid keypair file: {:?}", e))?, - &rpc_url, - )) - } else { - None - }; - let sa = if storage_account == "create" { - let name = shadow_drive_upload - .storage_account_name - .unwrap_or(manifest.name.clone()); + DeployDestination::ShadowDrive(shadow_drive_upload) => { + let ShadowDriveUploadArgs { + storage_account, + storage_account_size_mb, + storage_account_name, + alternate_keypair, + create, + .. + } = shadow_drive_upload; + + let alternate_keypair = alternate_keypair + .map(|alt_keypair| -> anyhow::Result { + read_keypair_file(Path::new(&alt_keypair)).map_err(|err| { + BonsolCliError::FailedToReadKeypair { + file: alt_keypair, + err: format!("{err:?}"), + } + .into() + }) + }) + .transpose()?; + let wallet = alternate_keypair.as_ref().unwrap_or(&signer); + let wallet_pubkey = wallet.pubkey(); + + let client = ShadowDriveClient::new(wallet, &rpc_url); + + let storage_account = if create { + let name = storage_account_name.unwrap_or(manifest.name.clone()); let min = std::cmp::max(((loaded_binary.len() as u64) / 1024 / 1024) * 2, 1); - let size = shadow_drive_upload.storage_account_size_mb.unwrap_or(min); - let res = if let Some(alt_client) = &alt_client { - alt_client - .create_storage_account( - &name, - Byte::from_unit(size as f64, ByteUnit::MB) - .map_err(|e| anyhow::anyhow!("Invalid size: {:?}", e))?, - shadow_drive_sdk::StorageAccountVersion::V2, - ) - .await - .map_err(|e| anyhow::anyhow!("Error creating storage account: {:?}", e))? - } else { - println!( - "Creating storage account with {}MB under the name {} with {}", - size, + let size = storage_account_size_mb.unwrap_or(min); + + println!( + "Creating storage account with {}MB under the name '{}' with signer pubkey {}", + size, &name, wallet_pubkey + ); + let storage_account = client + .create_storage_account( &name, - signer.pubkey() - ); - - shadow_drive - .create_storage_account( - &name, - Byte::from_unit(size as f64, ByteUnit::MB) - .map_err(|e| anyhow::anyhow!("Invalid size: {:?}", e))?, - shadow_drive_sdk::StorageAccountVersion::V2, + Byte::from_unit(size as f64, ByteUnit::MB).map_err(|err| { + BonsolCliError::ShadowDriveClientError( + ShadowDriveClientError::ByteError { + size: size as f64, + err, + }, + ) + })?, + shadow_drive_sdk::StorageAccountVersion::V2, + ) + .await + .map_err(|err| { + BonsolCliError::ShadowDriveClientError( + ShadowDriveClientError::StorageAccountCreationFailed { + name: name.clone(), + signer: wallet_pubkey, + size, + err, + }, ) - .await - .map_err(|e| anyhow::anyhow!("Error creating storage account: {:?}", e))? - }; - res.shdw_bucket - .ok_or(anyhow::anyhow!("Invalid storage account"))? + })? + .shdw_bucket + .ok_or(BonsolCliError::ShadowDriveClientError( + ShadowDriveClientError::InvalidStorageAccount { + name, + signer: wallet_pubkey, + size, + }, + ))?; + + println!("Created new storage account with public key: {storage_account}"); + storage_account } else { - storage_account.to_string() + // cli parsing prevents both `create` and `storage_account` to be passed simultaneously + // and require at least one or the other is passed, making this unwrap safe. + storage_account.unwrap().to_string() }; let name = format!("{}-{}", manifest.name, manifest.image_id); - let resp = if let Some(alt_client) = alt_client { - alt_client - .store_files( - &Pubkey::from_str(&sa)?, - vec![ShadowFile::bytes(name, loaded_binary)], - ) - .await - .map_err(|e| anyhow::anyhow!("Error uploading to shadow drive: {:?}", e))? - } else { - shadow_drive - .store_files( - &Pubkey::from_str(&sa)?, - vec![ShadowFile::bytes(name, loaded_binary)], - ) - .await - .map_err(|e| anyhow::anyhow!("Error uploading to shadow drive: {:?}", e))? - }; + let resp = client + .store_files( + &Pubkey::from_str(&storage_account)?, + vec![ShadowFile::bytes(name.clone(), loaded_binary)], + ) + .await + .map_err(|err| { + BonsolCliError::ShadowDriveClientError(ShadowDriveClientError::UploadFailed { + storage_account, + name: manifest.name.clone(), + binary_path: manifest.binary_path, + err, + }) + })?; + bar.finish_and_clear(); println!("Uploaded to shadow drive"); - Ok::<_, anyhow::Error>(resp.message) + resp.message } - Some(DeployType::Url) => { + DeployDestination::Url(url_upload) => { let req = reqwest::get(&url_upload.url).await?; let bytes = req.bytes().await?; if bytes != loaded_binary { - return Err(anyhow::anyhow!("The binary uploaded does not match the local binary, check that the url is correct")); + return Err(BonsolCliError::OriginBinaryMismatch { + url: url_upload.url, + binary_path: manifest.binary_path, + } + .into()); } + bar.finish_and_clear(); - Ok(url_upload.url) - } - _ => { - bar.finish_and_clear(); - return Err(anyhow::anyhow!("Please provide an upload config")); + url_upload.url } - }?; + }; if !auto_confirm { bar.finish_and_clear(); println!("Deploying to Solana, which will cost real money. Are you sure you want to continue? (y/n)"); let mut input = String::new(); std::io::stdin().read_line(&mut input).unwrap(); - if input.trim() != "y" { + let response = input.trim(); + if response != "y" { bar.finish_and_clear(); - println!("Aborting"); + println!("Response: {response}\nAborting..."); return Ok(()); } } let bonsol_client = BonsolClient::with_rpc_client(rpc_client); - let image_id = manifest.image_id.clone(); + let image_id = manifest.image_id; let deploy = bonsol_client.get_deployment(&image_id).await; match deploy { - Ok(Some(_)) => { + Ok(Some(account)) => { bar.finish_and_clear(); - println!("Deployment already exists, deployments are immutable"); + println!( + "Deployment for account '{}' already exists, deployments are immutable", + account.owner + ); Ok(()) } Ok(None) => { @@ -223,16 +251,13 @@ pub async fn deploy( .collect(), ) .await?; - match bonsol_client.send_txn_standard(signer, deploy_txn).await { - Ok(_) => { - bar.finish_and_clear(); - println!("{} deployed", image_id); - } - Err(e) => { - bar.finish_and_clear(); - anyhow::bail!(e); - } - }; + if let Err(err) = bonsol_client.send_txn_standard(signer, deploy_txn).await { + bar.finish_and_clear(); + anyhow::bail!(err) + } + + bar.finish_and_clear(); + println!("{} deployed", image_id); Ok(()) } Err(e) => { diff --git a/cli/src/error.rs b/cli/src/error.rs new file mode 100644 index 0000000..345d709 --- /dev/null +++ b/cli/src/error.rs @@ -0,0 +1,212 @@ +use std::io::Error as IoError; + +use byte_unit::ByteError; +use cargo_toml::Error as CargoManifestError; +use object_store::Error as S3Error; +use serde_json::Error as SerdeJsonError; +use shadow_drive_sdk::error::Error as ShdwDriveError; +use shadow_drive_sdk::Pubkey; +use thiserror::Error as DeriveError; + +pub(crate) const DEFAULT_SOLANA_CONFIG_PATH: &str = ".config/solana/cli/config.yml"; +pub(crate) const SOLANA_CONFIG_DOCS_URL: &str = + "https://solana.com/docs/intro/installation#solana-config"; + +#[derive(Debug, DeriveError)] +pub enum BonsolCliError { + #[error(transparent)] + ParseConfigError(#[from] ParseConfigError), + + #[error("Failed to read keypair from file '{file}': {err}")] + FailedToReadKeypair { file: String, err: String }, + + #[error("Account '{0}' does not have any SOL to pay for the transaction(s)")] + InsufficientFundsForTransactions(String), + + #[error(transparent)] + ZkManifestError(#[from] ZkManifestError), + + #[error("Build failed: the following errors were captured from stderr:\n\n{0}")] + BuildFailure(String), + + #[error("Failed to compute an image ID from binary at path '{binary_path}': {err:?}")] + FailedToComputeImageId { + binary_path: String, + err: anyhow::Error, + }, + + #[error(transparent)] + S3ClientError(#[from] S3ClientError), + + #[error(transparent)] + ShadowDriveClientError(#[from] ShadowDriveClientError), + + #[error("The binary uploaded does not match the local binary at path '{binary_path}', is the URL correct?\nupload_url: {url}")] + OriginBinaryMismatch { url: String, binary_path: String }, + + #[error("The following build dependencies are missing: {}", missing_deps.join(", "))] + MissingBuildDependencies { missing_deps: Vec }, +} + +#[derive(Debug, DeriveError, Clone)] +pub enum ParseConfigError { + #[error("")] + Uninitialized, + + #[error("The provided solana cli config path '{path:?}' does not exist")] + ConfigNotFound { path: String }, + + #[error("The default solana cli config path '/home/{whoami}/{DEFAULT_SOLANA_CONFIG_PATH}' does not exist.")] + DefaultConfigNotFound { whoami: String }, + + #[error("Failed to load solana cli config at '{path}': {err}")] + FailedToLoad { path: String, err: String }, +} +impl ParseConfigError { + pub(crate) fn context(&self, whoami: Option) -> String { + match self { + Self::ConfigNotFound { .. } => format!("The solana cli config path was invalid, please double check that the path is correct and try again.\nTip: Try using an absolute path."), + Self::DefaultConfigNotFound { .. } => format!( +"The default solana cli config path is used when no other options for deriving the RPC URL and keypair file path are provided, ie. '--rpc_url' and '--keypair', or a path to a config that isn't at the default location, ie '--config'. +Tip: Try running 'solana config get'. If you have a custom config path set, double check that the default path also exists. A custom config path can be passed to bonsol with the '--config' option, eg. 'bonsol --config /path/to/config.yml'. + +For more information on the solana cli config see: {}", + SOLANA_CONFIG_DOCS_URL + ), + Self::FailedToLoad { path, .. } => { + if let Some(whoami) = whoami { + let default_path = format!("/home/{}/{}", whoami, DEFAULT_SOLANA_CONFIG_PATH); + if path == &default_path { + return format!( +"The default solana cli config path is used when no other options for deriving the RPC URL and keypair file path are provided, ie. '--rpc_url' and '--keypair', or a path to a config that isn't at the default location, ie '--config'. +Tip: Try running 'solana config get'. This will give you information about your current config. If for whatever reason the keypair or RPC URL are missing, please follow the instructions below and try again. + +- To generate a new keypair at the default path: 'solana-keygen new' +- To set the RPC URL, select a cluster. For instance, 'mainnet-beta': 'solana config set --url mainnet-beta' + +For more information on the solana cli config see: {}", + SOLANA_CONFIG_DOCS_URL + ); + } + } + format!( +"The config at '{}' exists, but there was a problem parsing it into what bonsol needs, ie. a keypair file and RPC URL. +Tip: Try running 'solana config get'. This will give you information about your current config. If for whatever reason the keypair or RPC URL are missing, please follow the instructions below and try again. + +- To generate a new keypair at the default path: 'solana-keygen new' +- To set the RPC URL, select a cluster. For instance, 'mainnet-beta': 'solana config set --url mainnet-beta' + +For more information on the solana cli config see: {}", + path, + SOLANA_CONFIG_DOCS_URL + ) + }, + Self::Uninitialized => unreachable!(), + } + } +} + +#[derive(Debug, DeriveError)] +pub enum ZkManifestError { + #[error("Failed to open manifest at '{manifest_path}': {err:?}")] + FailedToOpen { manifest_path: String, err: IoError }, + + #[error("Failed to deserialize json manifest at '{manifest_path}': {err:?}")] + FailedDeserialization { + manifest_path: String, + err: SerdeJsonError, + }, + + #[error( + "Failed to produce zkprogram image binary path: Image binary path contains non-UTF8 encoded characters" + )] + InvalidBinaryPath, + + #[error("Failed to load binary from manifest at '{binary_path}': {err:?}")] + FailedToLoadBinary { binary_path: String, err: IoError }, + + #[error("Program path {0} does not contain a Cargo.toml")] + MissingManifest(String), + + #[error("Failed to load manifest at '{manifest_path}': {err:?}")] + FailedToLoadManifest { + manifest_path: String, + err: CargoManifestError, + }, + + #[error("Expected '{name}' to be a table at '{manifest_path}'")] + ExpectedTable { manifest_path: String, name: String }, + + #[error("Expected '{name}' to be an array at '{manifest_path}'")] + ExpectedArray { manifest_path: String, name: String }, + + #[error("Manifest at '{0}' does not contain a package name")] + MissingPackageName(String), + + #[error("Manifest at '{0}' does not contain a package metadata field")] + MissingPackageMetadata(String), + + #[error("Manifest at '{manifest_path}' has a metadata table that is missing a zkprogram metadata key: meta: {meta:?}")] + MissingProgramMetadata { + manifest_path: String, + meta: cargo_toml::Value, + }, + + #[error("Manifest at '{manifest_path}' has a zkprogram metadata table that is missing a input_order key: zkprogram: {zkprogram:?}")] + MissingInputOrder { + manifest_path: String, + zkprogram: cargo_toml::Value, + }, + + #[error("Failed to parse input: Input contains non-UTF8 encoded characters: {0}")] + InvalidInput(cargo_toml::Value), + + #[error("Failed to parse the following inputs at '{manifest_path}': {}", errs.join("\n"))] + InvalidInputs { + manifest_path: String, + errs: Vec, + }, +} + +#[derive(Debug, DeriveError)] +pub enum S3ClientError { + #[error("Failed to build S3 client with the following args:\n{}\n\n{err:?}", args.join(",\n"))] + FailedToBuildClient { args: Vec, err: S3Error }, + + #[error("Failed to upload to '{dest}': {err:?}")] + UploadFailed { + dest: object_store::path::Path, + err: S3Error, + }, +} + +#[derive(Debug, DeriveError)] +pub enum ShadowDriveClientError { + #[error( + "Failed to produce a valid byte representation for the given size ({size}_f64): {err:?}" + )] + ByteError { size: f64, err: ByteError }, + + #[error("Shadow Drive storage account creation failed for account with {size}MB under the name '{name}' with signer pubkey {signer}: {err:?}")] + StorageAccountCreationFailed { + name: String, + signer: Pubkey, + size: u64, + err: ShdwDriveError, + }, + + #[error("A Shadow Drive storage account was created without a valid bucket:\n\nsize: {size}MB\nname: {name}\nsigner_pubkey: {signer}")] + InvalidStorageAccount { + name: String, + signer: Pubkey, + size: u64, + }, + + #[error("Failed to upload binary at '{binary_path}' to Shadow Drive account '{storage_account}' under the name '{name}': {err:?}")] + UploadFailed { + storage_account: String, + name: String, + binary_path: String, + err: ShdwDriveError, + }, +} diff --git a/cli/src/main.rs b/cli/src/main.rs index bdf9052..f5819d3 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,101 +1,65 @@ +use std::io::{self, Read}; +use std::path::Path; + +use atty::Stream; +use bonsol_sdk::BonsolClient; +use clap::Parser; +use solana_sdk::signature::read_keypair_file; +use solana_sdk::signer::Signer; + +use crate::command::{BonsolCli, ParsedBonsolCli, ParsedCommand}; +use crate::common::{sol_check, try_load_from_config}; +use crate::error::BonsolCliError; + mod build; mod deploy; mod execute; mod init; mod prove; -// mod execute; pub mod command; pub mod common; -use anyhow::anyhow; -use atty::Stream; -use bonsol_sdk::BonsolClient; -use clap::Parser; -use command::{BonsolCli, Commands}; -use common::sol_check; -use solana_cli_config::{Config, CONFIG_FILE}; -use solana_sdk::signature::read_keypair_file; -use solana_sdk::signer::Signer; -use std::io::{self, Read}; -use std::path::Path; +pub(crate) mod error; -const SOL_CHECK_MESSAGE: &str = "Your account needs to have some SOL to pay for the transactions"; #[tokio::main] async fn main() -> anyhow::Result<()> { - let cli = BonsolCli::parse(); - let keypair = cli.keypair; - let config = cli.config; - let rpc_url = cli.rpc_url; - let (rpc, kpp) = match (rpc_url, keypair, config) { - (Some(rpc_url), Some(keypair), None) => (rpc_url, keypair), - (None, None, config) => { - let config_location = CONFIG_FILE - .clone() - .ok_or(anyhow!("Please provide a config file"))?; - let config = Config::load(&config.unwrap_or(config_location.clone())); - match config { - Ok(config) => (config.json_rpc_url, config.keypair_path), - Err(e) => { - anyhow::bail!("Error loading config [{}]: {:?}", config_location, e); - } - } - } - _ => { - anyhow::bail!("Please provide a keypair and rpc or a solana config file"); - } - }; + let ParsedBonsolCli { + config, + keypair, + rpc_url, + command, + } = BonsolCli::parse().try_into()?; - let keypair = read_keypair_file(Path::new(&kpp)); - if keypair.is_err() { - anyhow::bail!("Invalid keypair"); - } - let command = cli.command; - let keypair = keypair.unwrap(); - let stdin = if atty::isnt(Stream::Stdin) { - let mut buffer = String::new(); - io::stdin().read_to_string(&mut buffer)?; - if buffer.trim().is_empty() { - None - } else { - Some(buffer) - } - } else { - None + let (rpc, kpp) = match rpc_url.zip(keypair) { + Some(conf) => conf, + None => try_load_from_config(config)?, }; + let keypair = + read_keypair_file(Path::new(&kpp)).map_err(|err| BonsolCliError::FailedToReadKeypair { + file: kpp, + err: format!("{err:?}"), + })?; + let stdin = atty::isnt(Stream::Stdin) + .then(|| { + let mut buffer = String::new(); + io::stdin().read_to_string(&mut buffer).ok()?; + (!buffer.trim().is_empty()).then_some(buffer) + }) + .flatten(); let sdk = BonsolClient::new(rpc.clone()); + match command { - Commands::Build { zk_program_path } => match build::build(&keypair, zk_program_path) { - Err(e) => { - anyhow::bail!(e); - } - Ok(_) => { - println!("Build complete"); - } - }, - Commands::Deploy { - manifest_path, - s3_upload, - shadow_drive_upload, - auto_confirm, - deploy_type, - url_upload, - } => { + ParsedCommand::Build { zk_program_path } => build::build(&keypair, zk_program_path), + ParsedCommand::Deploy { deploy_args } => { if !sol_check(rpc.clone(), keypair.pubkey()).await { - anyhow::bail!(SOL_CHECK_MESSAGE); + return Err(BonsolCliError::InsufficientFundsForTransactions( + keypair.pubkey().to_string(), + ) + .into()); } - deploy::deploy( - rpc, - &keypair, - manifest_path, - s3_upload, - shadow_drive_upload, - url_upload, - auto_confirm, - deploy_type, - ) - .await?; + deploy::deploy(rpc, keypair, deploy_args).await } - Commands::Execute { + ParsedCommand::Execute { execution_request_file, program_id, execution_id, @@ -106,7 +70,10 @@ async fn main() -> anyhow::Result<()> { timeout, } => { if !sol_check(rpc.clone(), keypair.pubkey()).await { - anyhow::bail!(SOL_CHECK_MESSAGE); + return Err(BonsolCliError::InsufficientFundsForTransactions( + keypair.pubkey().to_string(), + ) + .into()); } execute::execute( &sdk, @@ -122,9 +89,9 @@ async fn main() -> anyhow::Result<()> { stdin, wait, ) - .await?; + .await } - Commands::Prove { + ParsedCommand::Prove { manifest_path, program_id, input_file, @@ -140,11 +107,8 @@ async fn main() -> anyhow::Result<()> { output_location, stdin, ) - .await?; - } - Commands::Init { project_name, dir } => { - init::init_project(&project_name, dir)?; + .await } - }; - Ok(()) + ParsedCommand::Init { project_name, dir } => init::init_project(&project_name, dir), + } } diff --git a/docs/docs/shared/deploy.mdx b/docs/docs/shared/deploy.mdx index 267d62b..e3d78ba 100644 --- a/docs/docs/shared/deploy.mdx +++ b/docs/docs/shared/deploy.mdx @@ -16,7 +16,7 @@ Manual deployment can be a cause of bugs and mismatches in this regard so we don To deploy manually you can use the following command. ```bash -bonsol deploy -m ./path-to-your-manifest.json -t {s3|shadowdrive|manual} +bonsol deploy -m ./path-to-your-manifest.json -t {s3|shadow-drive|url} ``` #### S3 @@ -31,9 +31,9 @@ ShadowDrive is a decentralized storage network that allows you to upload your pr If you have not already created a storage account you can create and upload in one command. ```bash -bonsol deploy -m ./path-to-your-manifest.json -t shadowdrive --storage-account-name {your storage account} --storage-account-size-mb {your storage account size in mb} --storage-account-name {your storage account name} --alternate-keypair {path to your alternate keypair} +bonsol deploy -m ./path-to-your-manifest.json -t shadow-drive --storage-account-name {your storage account} --storage-account-size-mb {your storage account size in mb} --storage-account-name {your storage account name} --alternate-keypair {path to your alternate keypair} ``` Once you have created your storage account you can upload your program to it for the future versions of your program. ```bash -bonsol deploy -m ./path-to-your-manifest.json -t shadowdrive --storage-account {your storage account} -``` \ No newline at end of file +bonsol deploy -m ./path-to-your-manifest.json -t shadow-drive --storage-account {your storage account} +``` diff --git a/flake.nix b/flake.nix index 78631c4..b85fa85 100644 --- a/flake.nix +++ b/flake.nix @@ -43,6 +43,16 @@ ./rust-toolchain.toml "sha256-VZZnlyP69+Y3crrLHQyJirqlHrTtGTsyiSnZB8jEvVo="; craneLib = (crane.mkLib pkgs).overrideToolchain rustToolchain.fenix-pkgs; + flatc = with pkgs; + (flatbuffers.overrideAttrs (old: rec { + version = "24.3.25"; + src = fetchFromGitHub { + owner = "google"; + repo = "flatbuffers"; + rev = "v${version}"; + hash = "sha256-uE9CQnhzVgOweYLhWPn2hvzXHyBbFiFVESJ1AEM3BmA="; + }; + })); workspace = rec { root = ./.; src = craneLib.cleanCargoSource root; @@ -92,6 +102,7 @@ pkg-config perl autoPatchelfHook + flatc ]; buildInputs = with pkgs; [ @@ -120,6 +131,7 @@ fileset = lib.fileset.unions ([ ./Cargo.toml ./Cargo.lock + ./schemas (workspace.canonicalizePath crate) ] ++ (workspace.canonicalizePaths deps)); }; @@ -156,12 +168,13 @@ }); # The root Cargo.toml requires all of the workspace crates, otherwise this would be a bit neater. - bonsol-cli = mkCrateDrv "bonsol" "cli" [ "sdk" "onchain" "schemas-rust" "iop" "node" "prover" ]; - bonsol-node = mkCrateDrv "bonsol-node" "node" [ "sdk" "onchain" "schemas-rust" "iop" "cli" "prover" ]; + bonsol-cli = mkCrateDrv "bonsol" "cli" [ "sdk" "onchain" "schemas-rust" "iop" "node" "prover" "tester" ]; + bonsol-node = mkCrateDrv "bonsol-node" "node" [ "sdk" "onchain" "schemas-rust" "iop" "cli" "prover" "tester" ]; + node_toml = pkgs.callPackage ./nixos/pkgs/bonsol/Node.toml.nix { inherit risc0-groth16-prover; }; setup = pkgs.callPackage ./nixos/pkgs/bonsol/setup.nix { }; validator = pkgs.callPackage ./nixos/pkgs/bonsol/validator.nix { }; - run-node = pkgs.callPackage ./nixos/pkgs/bonsol/run-node.nix { inherit bonsol-node; }; + run-node = pkgs.callPackage ./nixos/pkgs/bonsol/run-node.nix { inherit bonsol-node node_toml; }; # Internally managed versions of risc0 binaries that are pinned to # the version that bonsol relies on. @@ -264,7 +277,6 @@ setup validator - run-node cargo-risczero r0vm @@ -272,6 +284,10 @@ solana-cli solana-platform-tools; + run-node = (run-node.override { + use-nix = true; + }); + simple-e2e-script = pkgs.writeShellApplication { name = "simple-e2e-test"; @@ -285,10 +301,10 @@ ] ++ [ r0vm cargo-risczero + risc0-groth16-prover solana-cli bonsol-cli bonsol-node - setup validator (run-node.override { use-nix = true; @@ -296,7 +312,6 @@ ]; text = '' - ${setup}/bin/setup.sh ${bonsol-cli}/bin/bonsol --keypair $HOME/.config/solana/id.json --rpc-url http://localhost:8899 build -z images/simple echo "building validator" ${validator}/bin/validator.sh > /dev/null 2>&1 & @@ -308,7 +323,7 @@ node_pid=$! sleep 30 echo "node is running: PID $node_pid" - ${bonsol-cli}/bin/bonsol --keypair $HOME/.config/solana/id.json --rpc-url http://localhost:8899 deploy -m images/simple/manifest.json -t url --url https://bonsol-public-images.s3.amazonaws.com/simple-7cb4887749266c099ad1793e8a7d486a27ff1426d614ec0cc9ff50e686d17699 -y + ${bonsol-cli}/bin/bonsol --keypair $HOME/.config/solana/id.json --rpc-url http://localhost:8899 deploy url https://bonsol-public-images.s3.amazonaws.com/simple-68f4b0c5f9ce034aa60ceb264a18d6c410a3af68fafd931bcfd9ebe7c1e42960 -m images/simple/manifest.json -y sleep 20 resp=$(${bonsol-cli}/bin/bonsol --keypair $HOME/.config/solana/id.json --rpc-url http://localhost:8899 execute -f testing-examples/example-execution-request.json -x 2000 -m 2000 -w) echo "execution response was: $resp" @@ -335,6 +350,7 @@ nil # nix lsp nixpkgs-fmt # nix formatter rustup + flatc # `setup.sh` dependencies docker @@ -342,9 +358,6 @@ nodejs_22 python3 udev - - # checked for at runtime but never used - cargo-binstall ] ++ [ setup validator diff --git a/nixos/pkgs/bonsol/Node.toml.nix b/nixos/pkgs/bonsol/Node.toml.nix new file mode 100644 index 0000000..95465b7 --- /dev/null +++ b/nixos/pkgs/bonsol/Node.toml.nix @@ -0,0 +1,10 @@ +{ writeTextFile +, risc0-groth16-prover +}: +let + name = "Node.toml"; + text = (builtins.replaceStrings [ "./stark/" ] [ "${risc0-groth16-prover}/stark/" ] (builtins.readFile ../../../${name})); +in +writeTextFile { + inherit name text; +} diff --git a/nixos/pkgs/bonsol/run-node.nix b/nixos/pkgs/bonsol/run-node.nix index cabb309..40bf4da 100644 --- a/nixos/pkgs/bonsol/run-node.nix +++ b/nixos/pkgs/bonsol/run-node.nix @@ -1,5 +1,6 @@ { writeShellScriptBin , bonsol-node +, node_toml , use-nix ? false # whether or not to use the nix pre-built bonsol-node binary }: let @@ -10,7 +11,9 @@ let # This also avoids unnecessary build times. from = [ "cargo run --release -p bonsol-node --" "cargo run --release -p bonsol-node --features metal --" ]; to = [ node_path node_path ]; - contents = (builtins.readFile ../../../${name}); + + # Override the path to the Node.toml to reference the gorth16 tools in the nix store + contents = (builtins.replaceStrings [ "./Node.toml" ] [ "${node_toml}" ] (builtins.readFile ../../../bin/${name})); in writeShellScriptBin name ( if use-nix then diff --git a/nixos/pkgs/bonsol/setup.nix b/nixos/pkgs/bonsol/setup.nix index 70460a5..a919f65 100644 --- a/nixos/pkgs/bonsol/setup.nix +++ b/nixos/pkgs/bonsol/setup.nix @@ -6,4 +6,4 @@ let name = "setup.sh"; in -writeShellScriptBin name (builtins.readFile ../../../${name}) +writeShellScriptBin name (builtins.readFile ../../../bin/${name}) diff --git a/nixos/pkgs/bonsol/validator.nix b/nixos/pkgs/bonsol/validator.nix index 7ebe66d..8c70aa3 100644 --- a/nixos/pkgs/bonsol/validator.nix +++ b/nixos/pkgs/bonsol/validator.nix @@ -2,4 +2,4 @@ let name = "validator.sh"; in -writeShellScriptBin name (builtins.readFile ../../../${name}) +writeShellScriptBin name (builtins.readFile ../../../bin/${name})